re PR target/81959 (PowerPC __float128 optimization fails with integer PRE_INC addresses)
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob5f5f6d51ef85519257bd74bfd7831cc1b07bdaa2
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
90 systems will also set long double to be IEEE 128-bit. AIX and Darwin
91 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
92 those systems will not pick up this default. This needs to be after all
93 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
94 properly defined. */
95 #ifndef TARGET_IEEEQUAD_DEFAULT
96 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
97 #define TARGET_IEEEQUAD_DEFAULT 1
98 #else
99 #define TARGET_IEEEQUAD_DEFAULT 0
100 #endif
101 #endif
103 #define min(A,B) ((A) < (B) ? (A) : (B))
104 #define max(A,B) ((A) > (B) ? (A) : (B))
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
200 /* Width in bits of a pointer. */
201 unsigned rs6000_pointer_size;
203 #ifdef HAVE_AS_GNU_ATTRIBUTE
204 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
205 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
206 # endif
207 /* Flag whether floating point values have been passed/returned.
208 Note that this doesn't say whether fprs are used, since the
209 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
210 should be set for soft-float values passed in gprs and ieee128
211 values passed in vsx registers. */
212 static bool rs6000_passes_float;
213 static bool rs6000_passes_long_double;
214 /* Flag whether vector values have been passed/returned. */
215 static bool rs6000_passes_vector;
216 /* Flag whether small (<= 8 byte) structures have been returned. */
217 static bool rs6000_returns_struct;
218 #endif
220 /* Value is TRUE if register/mode pair is acceptable. */
221 static bool rs6000_hard_regno_mode_ok_p
222 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
224 /* Maximum number of registers needed for a given register class and mode. */
225 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
227 /* How many registers are needed for a given register and mode. */
228 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
230 /* Map register number to register class. */
231 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
233 static int dbg_cost_ctrl;
235 /* Built in types. */
236 tree rs6000_builtin_types[RS6000_BTI_MAX];
237 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
239 /* Flag to say the TOC is initialized */
240 int toc_initialized, need_toc_init;
241 char toc_label_name[10];
243 /* Cached value of rs6000_variable_issue. This is cached in
244 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
245 static short cached_can_issue_more;
247 static GTY(()) section *read_only_data_section;
248 static GTY(()) section *private_data_section;
249 static GTY(()) section *tls_data_section;
250 static GTY(()) section *tls_private_data_section;
251 static GTY(()) section *read_only_private_data_section;
252 static GTY(()) section *sdata2_section;
253 static GTY(()) section *toc_section;
255 struct builtin_description
257 const HOST_WIDE_INT mask;
258 const enum insn_code icode;
259 const char *const name;
260 const enum rs6000_builtins code;
263 /* Describe the vector unit used for modes. */
264 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
265 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
267 /* Register classes for various constraints that are based on the target
268 switches. */
269 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
271 /* Describe the alignment of a vector. */
272 int rs6000_vector_align[NUM_MACHINE_MODES];
274 /* Map selected modes to types for builtins. */
275 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
277 /* What modes to automatically generate reciprocal divide estimate (fre) and
278 reciprocal sqrt (frsqrte) for. */
279 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
281 /* Masks to determine which reciprocal esitmate instructions to generate
282 automatically. */
283 enum rs6000_recip_mask {
284 RECIP_SF_DIV = 0x001, /* Use divide estimate */
285 RECIP_DF_DIV = 0x002,
286 RECIP_V4SF_DIV = 0x004,
287 RECIP_V2DF_DIV = 0x008,
289 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
290 RECIP_DF_RSQRT = 0x020,
291 RECIP_V4SF_RSQRT = 0x040,
292 RECIP_V2DF_RSQRT = 0x080,
294 /* Various combination of flags for -mrecip=xxx. */
295 RECIP_NONE = 0,
296 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
297 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
298 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
300 RECIP_HIGH_PRECISION = RECIP_ALL,
302 /* On low precision machines like the power5, don't enable double precision
303 reciprocal square root estimate, since it isn't accurate enough. */
304 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
307 /* -mrecip options. */
308 static struct
310 const char *string; /* option name */
311 unsigned int mask; /* mask bits to set */
312 } recip_options[] = {
313 { "all", RECIP_ALL },
314 { "none", RECIP_NONE },
315 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
316 | RECIP_V2DF_DIV) },
317 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
318 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
319 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
320 | RECIP_V2DF_RSQRT) },
321 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
322 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
325 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
326 static const struct
328 const char *cpu;
329 unsigned int cpuid;
330 } cpu_is_info[] = {
331 { "power9", PPC_PLATFORM_POWER9 },
332 { "power8", PPC_PLATFORM_POWER8 },
333 { "power7", PPC_PLATFORM_POWER7 },
334 { "power6x", PPC_PLATFORM_POWER6X },
335 { "power6", PPC_PLATFORM_POWER6 },
336 { "power5+", PPC_PLATFORM_POWER5_PLUS },
337 { "power5", PPC_PLATFORM_POWER5 },
338 { "ppc970", PPC_PLATFORM_PPC970 },
339 { "power4", PPC_PLATFORM_POWER4 },
340 { "ppca2", PPC_PLATFORM_PPCA2 },
341 { "ppc476", PPC_PLATFORM_PPC476 },
342 { "ppc464", PPC_PLATFORM_PPC464 },
343 { "ppc440", PPC_PLATFORM_PPC440 },
344 { "ppc405", PPC_PLATFORM_PPC405 },
345 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
348 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
349 static const struct
351 const char *hwcap;
352 int mask;
353 unsigned int id;
354 } cpu_supports_info[] = {
355 /* AT_HWCAP masks. */
356 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
357 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
358 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
359 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
360 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
361 { "booke", PPC_FEATURE_BOOKE, 0 },
362 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
363 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
364 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
365 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
366 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
367 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
368 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
369 { "notb", PPC_FEATURE_NO_TB, 0 },
370 { "pa6t", PPC_FEATURE_PA6T, 0 },
371 { "power4", PPC_FEATURE_POWER4, 0 },
372 { "power5", PPC_FEATURE_POWER5, 0 },
373 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
374 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
375 { "ppc32", PPC_FEATURE_32, 0 },
376 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
377 { "ppc64", PPC_FEATURE_64, 0 },
378 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
379 { "smt", PPC_FEATURE_SMT, 0 },
380 { "spe", PPC_FEATURE_HAS_SPE, 0 },
381 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
382 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
383 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
385 /* AT_HWCAP2 masks. */
386 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
387 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
388 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
389 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
390 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
391 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
392 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
393 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
394 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
395 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
396 { "darn", PPC_FEATURE2_DARN, 1 },
397 { "scv", PPC_FEATURE2_SCV, 1 }
400 /* On PowerPC, we have a limited number of target clones that we care about
401 which means we can use an array to hold the options, rather than having more
402 elaborate data structures to identify each possible variation. Order the
403 clones from the default to the highest ISA. */
404 enum {
405 CLONE_DEFAULT = 0, /* default clone. */
406 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
407 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
408 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
409 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
410 CLONE_MAX
413 /* Map compiler ISA bits into HWCAP names. */
414 struct clone_map {
415 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
416 const char *name; /* name to use in __builtin_cpu_supports. */
419 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
420 { 0, "" }, /* Default options. */
421 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
422 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
423 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
424 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
428 /* Newer LIBCs explicitly export this symbol to declare that they provide
429 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
430 reference to this symbol whenever we expand a CPU builtin, so that
431 we never link against an old LIBC. */
432 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
434 /* True if we have expanded a CPU builtin. */
435 bool cpu_builtin_p;
437 /* Pointer to function (in rs6000-c.c) that can define or undefine target
438 macros that have changed. Languages that don't support the preprocessor
439 don't link in rs6000-c.c, so we can't call it directly. */
440 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
442 /* Simplfy register classes into simpler classifications. We assume
443 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
444 check for standard register classes (gpr/floating/altivec/vsx) and
445 floating/vector classes (float/altivec/vsx). */
447 enum rs6000_reg_type {
448 NO_REG_TYPE,
449 PSEUDO_REG_TYPE,
450 GPR_REG_TYPE,
451 VSX_REG_TYPE,
452 ALTIVEC_REG_TYPE,
453 FPR_REG_TYPE,
454 SPR_REG_TYPE,
455 CR_REG_TYPE
458 /* Map register class to register type. */
459 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
461 /* First/last register type for the 'normal' register types (i.e. general
462 purpose, floating point, altivec, and VSX registers). */
463 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
465 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
468 /* Register classes we care about in secondary reload or go if legitimate
469 address. We only need to worry about GPR, FPR, and Altivec registers here,
470 along an ANY field that is the OR of the 3 register classes. */
472 enum rs6000_reload_reg_type {
473 RELOAD_REG_GPR, /* General purpose registers. */
474 RELOAD_REG_FPR, /* Traditional floating point regs. */
475 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
476 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
477 N_RELOAD_REG
480 /* For setting up register classes, loop through the 3 register classes mapping
481 into real registers, and skip the ANY class, which is just an OR of the
482 bits. */
483 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
484 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
486 /* Map reload register type to a register in the register class. */
487 struct reload_reg_map_type {
488 const char *name; /* Register class name. */
489 int reg; /* Register in the register class. */
492 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
493 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
494 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
495 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
496 { "Any", -1 }, /* RELOAD_REG_ANY. */
499 /* Mask bits for each register class, indexed per mode. Historically the
500 compiler has been more restrictive which types can do PRE_MODIFY instead of
501 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
502 typedef unsigned char addr_mask_type;
504 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
505 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
506 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
507 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
508 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
509 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
510 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
511 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
513 /* Register type masks based on the type, of valid addressing modes. */
514 struct rs6000_reg_addr {
515 enum insn_code reload_load; /* INSN to reload for loading. */
516 enum insn_code reload_store; /* INSN to reload for storing. */
517 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
518 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
519 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
520 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
521 /* INSNs for fusing addi with loads
522 or stores for each reg. class. */
523 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
524 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
525 /* INSNs for fusing addis with loads
526 or stores for each reg. class. */
527 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
528 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
529 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
530 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
531 bool fused_toc; /* Mode supports TOC fusion. */
534 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
536 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
537 static inline bool
538 mode_supports_pre_incdec_p (machine_mode mode)
540 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
541 != 0);
544 /* Helper function to say whether a mode supports PRE_MODIFY. */
545 static inline bool
546 mode_supports_pre_modify_p (machine_mode mode)
548 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
549 != 0);
552 /* Given that there exists at least one variable that is set (produced)
553 by OUT_INSN and read (consumed) by IN_INSN, return true iff
554 IN_INSN represents one or more memory store operations and none of
555 the variables set by OUT_INSN is used by IN_INSN as the address of a
556 store operation. If either IN_INSN or OUT_INSN does not represent
557 a "single" RTL SET expression (as loosely defined by the
558 implementation of the single_set function) or a PARALLEL with only
559 SETs, CLOBBERs, and USEs inside, this function returns false.
561 This rs6000-specific version of store_data_bypass_p checks for
562 certain conditions that result in assertion failures (and internal
563 compiler errors) in the generic store_data_bypass_p function and
564 returns false rather than calling store_data_bypass_p if one of the
565 problematic conditions is detected. */
568 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
570 rtx out_set, in_set;
571 rtx out_pat, in_pat;
572 rtx out_exp, in_exp;
573 int i, j;
575 in_set = single_set (in_insn);
576 if (in_set)
578 if (MEM_P (SET_DEST (in_set)))
580 out_set = single_set (out_insn);
581 if (!out_set)
583 out_pat = PATTERN (out_insn);
584 if (GET_CODE (out_pat) == PARALLEL)
586 for (i = 0; i < XVECLEN (out_pat, 0); i++)
588 out_exp = XVECEXP (out_pat, 0, i);
589 if ((GET_CODE (out_exp) == CLOBBER)
590 || (GET_CODE (out_exp) == USE))
591 continue;
592 else if (GET_CODE (out_exp) != SET)
593 return false;
599 else
601 in_pat = PATTERN (in_insn);
602 if (GET_CODE (in_pat) != PARALLEL)
603 return false;
605 for (i = 0; i < XVECLEN (in_pat, 0); i++)
607 in_exp = XVECEXP (in_pat, 0, i);
608 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
609 continue;
610 else if (GET_CODE (in_exp) != SET)
611 return false;
613 if (MEM_P (SET_DEST (in_exp)))
615 out_set = single_set (out_insn);
616 if (!out_set)
618 out_pat = PATTERN (out_insn);
619 if (GET_CODE (out_pat) != PARALLEL)
620 return false;
621 for (j = 0; j < XVECLEN (out_pat, 0); j++)
623 out_exp = XVECEXP (out_pat, 0, j);
624 if ((GET_CODE (out_exp) == CLOBBER)
625 || (GET_CODE (out_exp) == USE))
626 continue;
627 else if (GET_CODE (out_exp) != SET)
628 return false;
634 return store_data_bypass_p (out_insn, in_insn);
637 /* Return true if we have D-form addressing in altivec registers. */
638 static inline bool
639 mode_supports_vmx_dform (machine_mode mode)
641 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
644 /* Return true if we have D-form addressing in VSX registers. This addressing
645 is more limited than normal d-form addressing in that the offset must be
646 aligned on a 16-byte boundary. */
647 static inline bool
648 mode_supports_vsx_dform_quad (machine_mode mode)
650 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
651 != 0);
655 /* Processor costs (relative to an add) */
657 const struct processor_costs *rs6000_cost;
659 /* Instruction size costs on 32bit processors. */
660 static const
661 struct processor_costs size32_cost = {
662 COSTS_N_INSNS (1), /* mulsi */
663 COSTS_N_INSNS (1), /* mulsi_const */
664 COSTS_N_INSNS (1), /* mulsi_const9 */
665 COSTS_N_INSNS (1), /* muldi */
666 COSTS_N_INSNS (1), /* divsi */
667 COSTS_N_INSNS (1), /* divdi */
668 COSTS_N_INSNS (1), /* fp */
669 COSTS_N_INSNS (1), /* dmul */
670 COSTS_N_INSNS (1), /* sdiv */
671 COSTS_N_INSNS (1), /* ddiv */
672 32, /* cache line size */
673 0, /* l1 cache */
674 0, /* l2 cache */
675 0, /* streams */
676 0, /* SF->DF convert */
679 /* Instruction size costs on 64bit processors. */
680 static const
681 struct processor_costs size64_cost = {
682 COSTS_N_INSNS (1), /* mulsi */
683 COSTS_N_INSNS (1), /* mulsi_const */
684 COSTS_N_INSNS (1), /* mulsi_const9 */
685 COSTS_N_INSNS (1), /* muldi */
686 COSTS_N_INSNS (1), /* divsi */
687 COSTS_N_INSNS (1), /* divdi */
688 COSTS_N_INSNS (1), /* fp */
689 COSTS_N_INSNS (1), /* dmul */
690 COSTS_N_INSNS (1), /* sdiv */
691 COSTS_N_INSNS (1), /* ddiv */
692 128, /* cache line size */
693 0, /* l1 cache */
694 0, /* l2 cache */
695 0, /* streams */
696 0, /* SF->DF convert */
699 /* Instruction costs on RS64A processors. */
700 static const
701 struct processor_costs rs64a_cost = {
702 COSTS_N_INSNS (20), /* mulsi */
703 COSTS_N_INSNS (12), /* mulsi_const */
704 COSTS_N_INSNS (8), /* mulsi_const9 */
705 COSTS_N_INSNS (34), /* muldi */
706 COSTS_N_INSNS (65), /* divsi */
707 COSTS_N_INSNS (67), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (4), /* dmul */
710 COSTS_N_INSNS (31), /* sdiv */
711 COSTS_N_INSNS (31), /* ddiv */
712 128, /* cache line size */
713 128, /* l1 cache */
714 2048, /* l2 cache */
715 1, /* streams */
716 0, /* SF->DF convert */
719 /* Instruction costs on MPCCORE processors. */
720 static const
721 struct processor_costs mpccore_cost = {
722 COSTS_N_INSNS (2), /* mulsi */
723 COSTS_N_INSNS (2), /* mulsi_const */
724 COSTS_N_INSNS (2), /* mulsi_const9 */
725 COSTS_N_INSNS (2), /* muldi */
726 COSTS_N_INSNS (6), /* divsi */
727 COSTS_N_INSNS (6), /* divdi */
728 COSTS_N_INSNS (4), /* fp */
729 COSTS_N_INSNS (5), /* dmul */
730 COSTS_N_INSNS (10), /* sdiv */
731 COSTS_N_INSNS (17), /* ddiv */
732 32, /* cache line size */
733 4, /* l1 cache */
734 16, /* l2 cache */
735 1, /* streams */
736 0, /* SF->DF convert */
739 /* Instruction costs on PPC403 processors. */
740 static const
741 struct processor_costs ppc403_cost = {
742 COSTS_N_INSNS (4), /* mulsi */
743 COSTS_N_INSNS (4), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (4), /* muldi */
746 COSTS_N_INSNS (33), /* divsi */
747 COSTS_N_INSNS (33), /* divdi */
748 COSTS_N_INSNS (11), /* fp */
749 COSTS_N_INSNS (11), /* dmul */
750 COSTS_N_INSNS (11), /* sdiv */
751 COSTS_N_INSNS (11), /* ddiv */
752 32, /* cache line size */
753 4, /* l1 cache */
754 16, /* l2 cache */
755 1, /* streams */
756 0, /* SF->DF convert */
759 /* Instruction costs on PPC405 processors. */
760 static const
761 struct processor_costs ppc405_cost = {
762 COSTS_N_INSNS (5), /* mulsi */
763 COSTS_N_INSNS (4), /* mulsi_const */
764 COSTS_N_INSNS (3), /* mulsi_const9 */
765 COSTS_N_INSNS (5), /* muldi */
766 COSTS_N_INSNS (35), /* divsi */
767 COSTS_N_INSNS (35), /* divdi */
768 COSTS_N_INSNS (11), /* fp */
769 COSTS_N_INSNS (11), /* dmul */
770 COSTS_N_INSNS (11), /* sdiv */
771 COSTS_N_INSNS (11), /* ddiv */
772 32, /* cache line size */
773 16, /* l1 cache */
774 128, /* l2 cache */
775 1, /* streams */
776 0, /* SF->DF convert */
779 /* Instruction costs on PPC440 processors. */
780 static const
781 struct processor_costs ppc440_cost = {
782 COSTS_N_INSNS (3), /* mulsi */
783 COSTS_N_INSNS (2), /* mulsi_const */
784 COSTS_N_INSNS (2), /* mulsi_const9 */
785 COSTS_N_INSNS (3), /* muldi */
786 COSTS_N_INSNS (34), /* divsi */
787 COSTS_N_INSNS (34), /* divdi */
788 COSTS_N_INSNS (5), /* fp */
789 COSTS_N_INSNS (5), /* dmul */
790 COSTS_N_INSNS (19), /* sdiv */
791 COSTS_N_INSNS (33), /* ddiv */
792 32, /* cache line size */
793 32, /* l1 cache */
794 256, /* l2 cache */
795 1, /* streams */
796 0, /* SF->DF convert */
799 /* Instruction costs on PPC476 processors. */
800 static const
801 struct processor_costs ppc476_cost = {
802 COSTS_N_INSNS (4), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (4), /* mulsi_const9 */
805 COSTS_N_INSNS (4), /* muldi */
806 COSTS_N_INSNS (11), /* divsi */
807 COSTS_N_INSNS (11), /* divdi */
808 COSTS_N_INSNS (6), /* fp */
809 COSTS_N_INSNS (6), /* dmul */
810 COSTS_N_INSNS (19), /* sdiv */
811 COSTS_N_INSNS (33), /* ddiv */
812 32, /* l1 cache line size */
813 32, /* l1 cache */
814 512, /* l2 cache */
815 1, /* streams */
816 0, /* SF->DF convert */
819 /* Instruction costs on PPC601 processors. */
820 static const
821 struct processor_costs ppc601_cost = {
822 COSTS_N_INSNS (5), /* mulsi */
823 COSTS_N_INSNS (5), /* mulsi_const */
824 COSTS_N_INSNS (5), /* mulsi_const9 */
825 COSTS_N_INSNS (5), /* muldi */
826 COSTS_N_INSNS (36), /* divsi */
827 COSTS_N_INSNS (36), /* divdi */
828 COSTS_N_INSNS (4), /* fp */
829 COSTS_N_INSNS (5), /* dmul */
830 COSTS_N_INSNS (17), /* sdiv */
831 COSTS_N_INSNS (31), /* ddiv */
832 32, /* cache line size */
833 32, /* l1 cache */
834 256, /* l2 cache */
835 1, /* streams */
836 0, /* SF->DF convert */
839 /* Instruction costs on PPC603 processors. */
840 static const
841 struct processor_costs ppc603_cost = {
842 COSTS_N_INSNS (5), /* mulsi */
843 COSTS_N_INSNS (3), /* mulsi_const */
844 COSTS_N_INSNS (2), /* mulsi_const9 */
845 COSTS_N_INSNS (5), /* muldi */
846 COSTS_N_INSNS (37), /* divsi */
847 COSTS_N_INSNS (37), /* divdi */
848 COSTS_N_INSNS (3), /* fp */
849 COSTS_N_INSNS (4), /* dmul */
850 COSTS_N_INSNS (18), /* sdiv */
851 COSTS_N_INSNS (33), /* ddiv */
852 32, /* cache line size */
853 8, /* l1 cache */
854 64, /* l2 cache */
855 1, /* streams */
856 0, /* SF->DF convert */
859 /* Instruction costs on PPC604 processors. */
860 static const
861 struct processor_costs ppc604_cost = {
862 COSTS_N_INSNS (4), /* mulsi */
863 COSTS_N_INSNS (4), /* mulsi_const */
864 COSTS_N_INSNS (4), /* mulsi_const9 */
865 COSTS_N_INSNS (4), /* muldi */
866 COSTS_N_INSNS (20), /* divsi */
867 COSTS_N_INSNS (20), /* divdi */
868 COSTS_N_INSNS (3), /* fp */
869 COSTS_N_INSNS (3), /* dmul */
870 COSTS_N_INSNS (18), /* sdiv */
871 COSTS_N_INSNS (32), /* ddiv */
872 32, /* cache line size */
873 16, /* l1 cache */
874 512, /* l2 cache */
875 1, /* streams */
876 0, /* SF->DF convert */
879 /* Instruction costs on PPC604e processors. */
880 static const
881 struct processor_costs ppc604e_cost = {
882 COSTS_N_INSNS (2), /* mulsi */
883 COSTS_N_INSNS (2), /* mulsi_const */
884 COSTS_N_INSNS (2), /* mulsi_const9 */
885 COSTS_N_INSNS (2), /* muldi */
886 COSTS_N_INSNS (20), /* divsi */
887 COSTS_N_INSNS (20), /* divdi */
888 COSTS_N_INSNS (3), /* fp */
889 COSTS_N_INSNS (3), /* dmul */
890 COSTS_N_INSNS (18), /* sdiv */
891 COSTS_N_INSNS (32), /* ddiv */
892 32, /* cache line size */
893 32, /* l1 cache */
894 1024, /* l2 cache */
895 1, /* streams */
896 0, /* SF->DF convert */
899 /* Instruction costs on PPC620 processors. */
900 static const
901 struct processor_costs ppc620_cost = {
902 COSTS_N_INSNS (5), /* mulsi */
903 COSTS_N_INSNS (4), /* mulsi_const */
904 COSTS_N_INSNS (3), /* mulsi_const9 */
905 COSTS_N_INSNS (7), /* muldi */
906 COSTS_N_INSNS (21), /* divsi */
907 COSTS_N_INSNS (37), /* divdi */
908 COSTS_N_INSNS (3), /* fp */
909 COSTS_N_INSNS (3), /* dmul */
910 COSTS_N_INSNS (18), /* sdiv */
911 COSTS_N_INSNS (32), /* ddiv */
912 128, /* cache line size */
913 32, /* l1 cache */
914 1024, /* l2 cache */
915 1, /* streams */
916 0, /* SF->DF convert */
919 /* Instruction costs on PPC630 processors. */
920 static const
921 struct processor_costs ppc630_cost = {
922 COSTS_N_INSNS (5), /* mulsi */
923 COSTS_N_INSNS (4), /* mulsi_const */
924 COSTS_N_INSNS (3), /* mulsi_const9 */
925 COSTS_N_INSNS (7), /* muldi */
926 COSTS_N_INSNS (21), /* divsi */
927 COSTS_N_INSNS (37), /* divdi */
928 COSTS_N_INSNS (3), /* fp */
929 COSTS_N_INSNS (3), /* dmul */
930 COSTS_N_INSNS (17), /* sdiv */
931 COSTS_N_INSNS (21), /* ddiv */
932 128, /* cache line size */
933 64, /* l1 cache */
934 1024, /* l2 cache */
935 1, /* streams */
936 0, /* SF->DF convert */
939 /* Instruction costs on Cell processor. */
940 /* COSTS_N_INSNS (1) ~ one add. */
941 static const
942 struct processor_costs ppccell_cost = {
943 COSTS_N_INSNS (9/2)+2, /* mulsi */
944 COSTS_N_INSNS (6/2), /* mulsi_const */
945 COSTS_N_INSNS (6/2), /* mulsi_const9 */
946 COSTS_N_INSNS (15/2)+2, /* muldi */
947 COSTS_N_INSNS (38/2), /* divsi */
948 COSTS_N_INSNS (70/2), /* divdi */
949 COSTS_N_INSNS (10/2), /* fp */
950 COSTS_N_INSNS (10/2), /* dmul */
951 COSTS_N_INSNS (74/2), /* sdiv */
952 COSTS_N_INSNS (74/2), /* ddiv */
953 128, /* cache line size */
954 32, /* l1 cache */
955 512, /* l2 cache */
956 6, /* streams */
957 0, /* SF->DF convert */
960 /* Instruction costs on PPC750 and PPC7400 processors. */
961 static const
962 struct processor_costs ppc750_cost = {
963 COSTS_N_INSNS (5), /* mulsi */
964 COSTS_N_INSNS (3), /* mulsi_const */
965 COSTS_N_INSNS (2), /* mulsi_const9 */
966 COSTS_N_INSNS (5), /* muldi */
967 COSTS_N_INSNS (17), /* divsi */
968 COSTS_N_INSNS (17), /* divdi */
969 COSTS_N_INSNS (3), /* fp */
970 COSTS_N_INSNS (3), /* dmul */
971 COSTS_N_INSNS (17), /* sdiv */
972 COSTS_N_INSNS (31), /* ddiv */
973 32, /* cache line size */
974 32, /* l1 cache */
975 512, /* l2 cache */
976 1, /* streams */
977 0, /* SF->DF convert */
980 /* Instruction costs on PPC7450 processors. */
981 static const
982 struct processor_costs ppc7450_cost = {
983 COSTS_N_INSNS (4), /* mulsi */
984 COSTS_N_INSNS (3), /* mulsi_const */
985 COSTS_N_INSNS (3), /* mulsi_const9 */
986 COSTS_N_INSNS (4), /* muldi */
987 COSTS_N_INSNS (23), /* divsi */
988 COSTS_N_INSNS (23), /* divdi */
989 COSTS_N_INSNS (5), /* fp */
990 COSTS_N_INSNS (5), /* dmul */
991 COSTS_N_INSNS (21), /* sdiv */
992 COSTS_N_INSNS (35), /* ddiv */
993 32, /* cache line size */
994 32, /* l1 cache */
995 1024, /* l2 cache */
996 1, /* streams */
997 0, /* SF->DF convert */
1000 /* Instruction costs on PPC8540 processors. */
1001 static const
1002 struct processor_costs ppc8540_cost = {
1003 COSTS_N_INSNS (4), /* mulsi */
1004 COSTS_N_INSNS (4), /* mulsi_const */
1005 COSTS_N_INSNS (4), /* mulsi_const9 */
1006 COSTS_N_INSNS (4), /* muldi */
1007 COSTS_N_INSNS (19), /* divsi */
1008 COSTS_N_INSNS (19), /* divdi */
1009 COSTS_N_INSNS (4), /* fp */
1010 COSTS_N_INSNS (4), /* dmul */
1011 COSTS_N_INSNS (29), /* sdiv */
1012 COSTS_N_INSNS (29), /* ddiv */
1013 32, /* cache line size */
1014 32, /* l1 cache */
1015 256, /* l2 cache */
1016 1, /* prefetch streams /*/
1017 0, /* SF->DF convert */
1020 /* Instruction costs on E300C2 and E300C3 cores. */
1021 static const
1022 struct processor_costs ppce300c2c3_cost = {
1023 COSTS_N_INSNS (4), /* mulsi */
1024 COSTS_N_INSNS (4), /* mulsi_const */
1025 COSTS_N_INSNS (4), /* mulsi_const9 */
1026 COSTS_N_INSNS (4), /* muldi */
1027 COSTS_N_INSNS (19), /* divsi */
1028 COSTS_N_INSNS (19), /* divdi */
1029 COSTS_N_INSNS (3), /* fp */
1030 COSTS_N_INSNS (4), /* dmul */
1031 COSTS_N_INSNS (18), /* sdiv */
1032 COSTS_N_INSNS (33), /* ddiv */
1034 16, /* l1 cache */
1035 16, /* l2 cache */
1036 1, /* prefetch streams /*/
1037 0, /* SF->DF convert */
1040 /* Instruction costs on PPCE500MC processors. */
1041 static const
1042 struct processor_costs ppce500mc_cost = {
1043 COSTS_N_INSNS (4), /* mulsi */
1044 COSTS_N_INSNS (4), /* mulsi_const */
1045 COSTS_N_INSNS (4), /* mulsi_const9 */
1046 COSTS_N_INSNS (4), /* muldi */
1047 COSTS_N_INSNS (14), /* divsi */
1048 COSTS_N_INSNS (14), /* divdi */
1049 COSTS_N_INSNS (8), /* fp */
1050 COSTS_N_INSNS (10), /* dmul */
1051 COSTS_N_INSNS (36), /* sdiv */
1052 COSTS_N_INSNS (66), /* ddiv */
1053 64, /* cache line size */
1054 32, /* l1 cache */
1055 128, /* l2 cache */
1056 1, /* prefetch streams /*/
1057 0, /* SF->DF convert */
1060 /* Instruction costs on PPCE500MC64 processors. */
1061 static const
1062 struct processor_costs ppce500mc64_cost = {
1063 COSTS_N_INSNS (4), /* mulsi */
1064 COSTS_N_INSNS (4), /* mulsi_const */
1065 COSTS_N_INSNS (4), /* mulsi_const9 */
1066 COSTS_N_INSNS (4), /* muldi */
1067 COSTS_N_INSNS (14), /* divsi */
1068 COSTS_N_INSNS (14), /* divdi */
1069 COSTS_N_INSNS (4), /* fp */
1070 COSTS_N_INSNS (10), /* dmul */
1071 COSTS_N_INSNS (36), /* sdiv */
1072 COSTS_N_INSNS (66), /* ddiv */
1073 64, /* cache line size */
1074 32, /* l1 cache */
1075 128, /* l2 cache */
1076 1, /* prefetch streams /*/
1077 0, /* SF->DF convert */
1080 /* Instruction costs on PPCE5500 processors. */
1081 static const
1082 struct processor_costs ppce5500_cost = {
1083 COSTS_N_INSNS (5), /* mulsi */
1084 COSTS_N_INSNS (5), /* mulsi_const */
1085 COSTS_N_INSNS (4), /* mulsi_const9 */
1086 COSTS_N_INSNS (5), /* muldi */
1087 COSTS_N_INSNS (14), /* divsi */
1088 COSTS_N_INSNS (14), /* divdi */
1089 COSTS_N_INSNS (7), /* fp */
1090 COSTS_N_INSNS (10), /* dmul */
1091 COSTS_N_INSNS (36), /* sdiv */
1092 COSTS_N_INSNS (66), /* ddiv */
1093 64, /* cache line size */
1094 32, /* l1 cache */
1095 128, /* l2 cache */
1096 1, /* prefetch streams /*/
1097 0, /* SF->DF convert */
1100 /* Instruction costs on PPCE6500 processors. */
1101 static const
1102 struct processor_costs ppce6500_cost = {
1103 COSTS_N_INSNS (5), /* mulsi */
1104 COSTS_N_INSNS (5), /* mulsi_const */
1105 COSTS_N_INSNS (4), /* mulsi_const9 */
1106 COSTS_N_INSNS (5), /* muldi */
1107 COSTS_N_INSNS (14), /* divsi */
1108 COSTS_N_INSNS (14), /* divdi */
1109 COSTS_N_INSNS (7), /* fp */
1110 COSTS_N_INSNS (10), /* dmul */
1111 COSTS_N_INSNS (36), /* sdiv */
1112 COSTS_N_INSNS (66), /* ddiv */
1113 64, /* cache line size */
1114 32, /* l1 cache */
1115 128, /* l2 cache */
1116 1, /* prefetch streams /*/
1117 0, /* SF->DF convert */
1120 /* Instruction costs on AppliedMicro Titan processors. */
1121 static const
1122 struct processor_costs titan_cost = {
1123 COSTS_N_INSNS (5), /* mulsi */
1124 COSTS_N_INSNS (5), /* mulsi_const */
1125 COSTS_N_INSNS (5), /* mulsi_const9 */
1126 COSTS_N_INSNS (5), /* muldi */
1127 COSTS_N_INSNS (18), /* divsi */
1128 COSTS_N_INSNS (18), /* divdi */
1129 COSTS_N_INSNS (10), /* fp */
1130 COSTS_N_INSNS (10), /* dmul */
1131 COSTS_N_INSNS (46), /* sdiv */
1132 COSTS_N_INSNS (72), /* ddiv */
1133 32, /* cache line size */
1134 32, /* l1 cache */
1135 512, /* l2 cache */
1136 1, /* prefetch streams /*/
1137 0, /* SF->DF convert */
1140 /* Instruction costs on POWER4 and POWER5 processors. */
1141 static const
1142 struct processor_costs power4_cost = {
1143 COSTS_N_INSNS (3), /* mulsi */
1144 COSTS_N_INSNS (2), /* mulsi_const */
1145 COSTS_N_INSNS (2), /* mulsi_const9 */
1146 COSTS_N_INSNS (4), /* muldi */
1147 COSTS_N_INSNS (18), /* divsi */
1148 COSTS_N_INSNS (34), /* divdi */
1149 COSTS_N_INSNS (3), /* fp */
1150 COSTS_N_INSNS (3), /* dmul */
1151 COSTS_N_INSNS (17), /* sdiv */
1152 COSTS_N_INSNS (17), /* ddiv */
1153 128, /* cache line size */
1154 32, /* l1 cache */
1155 1024, /* l2 cache */
1156 8, /* prefetch streams /*/
1157 0, /* SF->DF convert */
1160 /* Instruction costs on POWER6 processors. */
1161 static const
1162 struct processor_costs power6_cost = {
1163 COSTS_N_INSNS (8), /* mulsi */
1164 COSTS_N_INSNS (8), /* mulsi_const */
1165 COSTS_N_INSNS (8), /* mulsi_const9 */
1166 COSTS_N_INSNS (8), /* muldi */
1167 COSTS_N_INSNS (22), /* divsi */
1168 COSTS_N_INSNS (28), /* divdi */
1169 COSTS_N_INSNS (3), /* fp */
1170 COSTS_N_INSNS (3), /* dmul */
1171 COSTS_N_INSNS (13), /* sdiv */
1172 COSTS_N_INSNS (16), /* ddiv */
1173 128, /* cache line size */
1174 64, /* l1 cache */
1175 2048, /* l2 cache */
1176 16, /* prefetch streams */
1177 0, /* SF->DF convert */
1180 /* Instruction costs on POWER7 processors. */
1181 static const
1182 struct processor_costs power7_cost = {
1183 COSTS_N_INSNS (2), /* mulsi */
1184 COSTS_N_INSNS (2), /* mulsi_const */
1185 COSTS_N_INSNS (2), /* mulsi_const9 */
1186 COSTS_N_INSNS (2), /* muldi */
1187 COSTS_N_INSNS (18), /* divsi */
1188 COSTS_N_INSNS (34), /* divdi */
1189 COSTS_N_INSNS (3), /* fp */
1190 COSTS_N_INSNS (3), /* dmul */
1191 COSTS_N_INSNS (13), /* sdiv */
1192 COSTS_N_INSNS (16), /* ddiv */
1193 128, /* cache line size */
1194 32, /* l1 cache */
1195 256, /* l2 cache */
1196 12, /* prefetch streams */
1197 COSTS_N_INSNS (3), /* SF->DF convert */
1200 /* Instruction costs on POWER8 processors. */
1201 static const
1202 struct processor_costs power8_cost = {
1203 COSTS_N_INSNS (3), /* mulsi */
1204 COSTS_N_INSNS (3), /* mulsi_const */
1205 COSTS_N_INSNS (3), /* mulsi_const9 */
1206 COSTS_N_INSNS (3), /* muldi */
1207 COSTS_N_INSNS (19), /* divsi */
1208 COSTS_N_INSNS (35), /* divdi */
1209 COSTS_N_INSNS (3), /* fp */
1210 COSTS_N_INSNS (3), /* dmul */
1211 COSTS_N_INSNS (14), /* sdiv */
1212 COSTS_N_INSNS (17), /* ddiv */
1213 128, /* cache line size */
1214 32, /* l1 cache */
1215 256, /* l2 cache */
1216 12, /* prefetch streams */
1217 COSTS_N_INSNS (3), /* SF->DF convert */
1220 /* Instruction costs on POWER9 processors. */
1221 static const
1222 struct processor_costs power9_cost = {
1223 COSTS_N_INSNS (3), /* mulsi */
1224 COSTS_N_INSNS (3), /* mulsi_const */
1225 COSTS_N_INSNS (3), /* mulsi_const9 */
1226 COSTS_N_INSNS (3), /* muldi */
1227 COSTS_N_INSNS (8), /* divsi */
1228 COSTS_N_INSNS (12), /* divdi */
1229 COSTS_N_INSNS (3), /* fp */
1230 COSTS_N_INSNS (3), /* dmul */
1231 COSTS_N_INSNS (13), /* sdiv */
1232 COSTS_N_INSNS (18), /* ddiv */
1233 128, /* cache line size */
1234 32, /* l1 cache */
1235 512, /* l2 cache */
1236 8, /* prefetch streams */
1237 COSTS_N_INSNS (3), /* SF->DF convert */
1240 /* Instruction costs on POWER A2 processors. */
1241 static const
1242 struct processor_costs ppca2_cost = {
1243 COSTS_N_INSNS (16), /* mulsi */
1244 COSTS_N_INSNS (16), /* mulsi_const */
1245 COSTS_N_INSNS (16), /* mulsi_const9 */
1246 COSTS_N_INSNS (16), /* muldi */
1247 COSTS_N_INSNS (22), /* divsi */
1248 COSTS_N_INSNS (28), /* divdi */
1249 COSTS_N_INSNS (3), /* fp */
1250 COSTS_N_INSNS (3), /* dmul */
1251 COSTS_N_INSNS (59), /* sdiv */
1252 COSTS_N_INSNS (72), /* ddiv */
1254 16, /* l1 cache */
1255 2048, /* l2 cache */
1256 16, /* prefetch streams */
1257 0, /* SF->DF convert */
1261 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1262 #undef RS6000_BUILTIN_0
1263 #undef RS6000_BUILTIN_1
1264 #undef RS6000_BUILTIN_2
1265 #undef RS6000_BUILTIN_3
1266 #undef RS6000_BUILTIN_A
1267 #undef RS6000_BUILTIN_D
1268 #undef RS6000_BUILTIN_H
1269 #undef RS6000_BUILTIN_P
1270 #undef RS6000_BUILTIN_Q
1271 #undef RS6000_BUILTIN_X
1273 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1274 { NAME, ICODE, MASK, ATTR },
1276 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1312 #include "rs6000-builtin.def"
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_Q
1324 #undef RS6000_BUILTIN_X
1326 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1327 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1330 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1331 static struct machine_function * rs6000_init_machine_status (void);
1332 static int rs6000_ra_ever_killed (void);
1333 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1337 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1338 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1339 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1340 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1341 bool);
1342 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1343 unsigned int);
1344 static bool is_microcoded_insn (rtx_insn *);
1345 static bool is_nonpipeline_insn (rtx_insn *);
1346 static bool is_cracked_insn (rtx_insn *);
1347 static bool is_load_insn (rtx, rtx *);
1348 static bool is_store_insn (rtx, rtx *);
1349 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1350 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1351 static bool insn_must_be_first_in_group (rtx_insn *);
1352 static bool insn_must_be_last_in_group (rtx_insn *);
1353 static void altivec_init_builtins (void);
1354 static tree builtin_function_type (machine_mode, machine_mode,
1355 machine_mode, machine_mode,
1356 enum rs6000_builtins, const char *name);
1357 static void rs6000_common_init_builtins (void);
1358 static void paired_init_builtins (void);
1359 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1360 static void htm_init_builtins (void);
1361 static rs6000_stack_t *rs6000_stack_info (void);
1362 static void is_altivec_return_reg (rtx, void *);
1363 int easy_vector_constant (rtx, machine_mode);
1364 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1365 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1366 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1367 bool, bool);
1368 #if TARGET_MACHO
1369 static void macho_branch_islands (void);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1378 machine_mode, rtx);
1379 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1380 machine_mode,
1381 rtx);
1382 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1383 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1384 enum reg_class);
1385 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1386 reg_class_t,
1387 reg_class_t);
1388 static bool rs6000_debug_can_change_mode_class (machine_mode,
1389 machine_mode,
1390 reg_class_t);
1391 static bool rs6000_save_toc_in_prologue_p (void);
1392 static rtx rs6000_internal_arg_pointer (void);
1394 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1395 int, int *)
1396 = rs6000_legitimize_reload_address;
1398 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1399 = rs6000_mode_dependent_address;
1401 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1402 machine_mode, rtx)
1403 = rs6000_secondary_reload_class;
1405 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1406 = rs6000_preferred_reload_class;
1408 const int INSN_NOT_AVAILABLE = -1;
1410 static void rs6000_print_isa_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static void rs6000_print_builtin_options (FILE *, int, const char *,
1413 HOST_WIDE_INT);
1414 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1417 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1418 enum rs6000_reg_type,
1419 machine_mode,
1420 secondary_reload_info *,
1421 bool);
1422 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1423 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1424 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426 /* Hash table stuff for keeping track of TOC entries. */
1428 struct GTY((for_user)) toc_hash_struct
1430 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1431 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1432 rtx key;
1433 machine_mode key_mode;
1434 int labelno;
1437 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 static hashval_t hash (toc_hash_struct *);
1440 static bool equal (toc_hash_struct *, toc_hash_struct *);
1443 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445 /* Hash table to keep track of the argument types for builtin functions. */
1447 struct GTY((for_user)) builtin_hash_struct
1449 tree type;
1450 machine_mode mode[4]; /* return value + 3 arguments. */
1451 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1454 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 static hashval_t hash (builtin_hash_struct *);
1457 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1460 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1463 /* Default register names. */
1464 char rs6000_reg_names[][8] =
1466 "0", "1", "2", "3", "4", "5", "6", "7",
1467 "8", "9", "10", "11", "12", "13", "14", "15",
1468 "16", "17", "18", "19", "20", "21", "22", "23",
1469 "24", "25", "26", "27", "28", "29", "30", "31",
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 "mq", "lr", "ctr","ap",
1475 "0", "1", "2", "3", "4", "5", "6", "7",
1476 "ca",
1477 /* AltiVec registers. */
1478 "0", "1", "2", "3", "4", "5", "6", "7",
1479 "8", "9", "10", "11", "12", "13", "14", "15",
1480 "16", "17", "18", "19", "20", "21", "22", "23",
1481 "24", "25", "26", "27", "28", "29", "30", "31",
1482 "vrsave", "vscr",
1483 /* Soft frame pointer. */
1484 "sfp",
1485 /* HTM SPR registers. */
1486 "tfhar", "tfiar", "texasr"
1489 #ifdef TARGET_REGNAMES
1490 static const char alt_reg_names[][8] =
1492 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1493 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1494 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1495 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1496 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1497 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1498 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1499 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1500 "mq", "lr", "ctr", "ap",
1501 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1502 "ca",
1503 /* AltiVec registers. */
1504 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1505 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1506 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1507 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1508 "vrsave", "vscr",
1509 /* Soft frame pointer. */
1510 "sfp",
1511 /* HTM SPR registers. */
1512 "tfhar", "tfiar", "texasr"
1514 #endif
1516 /* Table of valid machine attributes. */
1518 static const struct attribute_spec rs6000_attribute_table[] =
1520 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1521 affects_type_identity } */
1522 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1523 false },
1524 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1525 false },
1526 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1527 false },
1528 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1529 false },
1530 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1531 false },
1532 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1533 SUBTARGET_ATTRIBUTE_TABLE,
1534 #endif
1535 { NULL, 0, 0, false, false, false, NULL, false }
1538 #ifndef TARGET_PROFILE_KERNEL
1539 #define TARGET_PROFILE_KERNEL 0
1540 #endif
1542 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1543 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 /* Initialize the GCC target structure. */
1546 #undef TARGET_ATTRIBUTE_TABLE
1547 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1548 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1549 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1550 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1551 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553 #undef TARGET_ASM_ALIGNED_DI_OP
1554 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556 /* Default unaligned ops are only provided for ELF. Find the ops needed
1557 for non-ELF systems. */
1558 #ifndef OBJECT_FORMAT_ELF
1559 #if TARGET_XCOFF
1560 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1561 64-bit targets. */
1562 #undef TARGET_ASM_UNALIGNED_HI_OP
1563 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1564 #undef TARGET_ASM_UNALIGNED_SI_OP
1565 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1566 #undef TARGET_ASM_UNALIGNED_DI_OP
1567 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1568 #else
1569 /* For Darwin. */
1570 #undef TARGET_ASM_UNALIGNED_HI_OP
1571 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1572 #undef TARGET_ASM_UNALIGNED_SI_OP
1573 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1574 #undef TARGET_ASM_UNALIGNED_DI_OP
1575 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1576 #undef TARGET_ASM_ALIGNED_DI_OP
1577 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1578 #endif
1579 #endif
1581 /* This hook deals with fixups for relocatable code and DI-mode objects
1582 in 64-bit code. */
1583 #undef TARGET_ASM_INTEGER
1584 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1587 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1588 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1589 #endif
1591 #undef TARGET_SET_UP_BY_PROLOGUE
1592 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1595 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1596 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1597 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1598 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1602 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1604 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1605 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1608 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610 #undef TARGET_INTERNAL_ARG_POINTER
1611 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613 #undef TARGET_HAVE_TLS
1614 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616 #undef TARGET_CANNOT_FORCE_CONST_MEM
1617 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619 #undef TARGET_DELEGITIMIZE_ADDRESS
1620 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1623 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625 #undef TARGET_LEGITIMATE_COMBINED_INSN
1626 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628 #undef TARGET_ASM_FUNCTION_PROLOGUE
1629 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1630 #undef TARGET_ASM_FUNCTION_EPILOGUE
1631 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1634 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636 #undef TARGET_LEGITIMIZE_ADDRESS
1637 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639 #undef TARGET_SCHED_VARIABLE_ISSUE
1640 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642 #undef TARGET_SCHED_ISSUE_RATE
1643 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1644 #undef TARGET_SCHED_ADJUST_COST
1645 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1646 #undef TARGET_SCHED_ADJUST_PRIORITY
1647 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1648 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1649 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1650 #undef TARGET_SCHED_INIT
1651 #define TARGET_SCHED_INIT rs6000_sched_init
1652 #undef TARGET_SCHED_FINISH
1653 #define TARGET_SCHED_FINISH rs6000_sched_finish
1654 #undef TARGET_SCHED_REORDER
1655 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1656 #undef TARGET_SCHED_REORDER2
1657 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1660 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1663 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1666 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1667 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1668 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1669 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1670 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1671 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1672 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1675 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1678 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1679 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1680 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1681 rs6000_builtin_support_vector_misalignment
1682 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1683 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1684 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1685 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1686 rs6000_builtin_vectorization_cost
1687 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1688 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1689 rs6000_preferred_simd_mode
1690 #undef TARGET_VECTORIZE_INIT_COST
1691 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1692 #undef TARGET_VECTORIZE_ADD_STMT_COST
1693 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1694 #undef TARGET_VECTORIZE_FINISH_COST
1695 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1696 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1697 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699 #undef TARGET_INIT_BUILTINS
1700 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1701 #undef TARGET_BUILTIN_DECL
1702 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704 #undef TARGET_FOLD_BUILTIN
1705 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1706 #undef TARGET_GIMPLE_FOLD_BUILTIN
1707 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709 #undef TARGET_EXPAND_BUILTIN
1710 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712 #undef TARGET_MANGLE_TYPE
1713 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715 #undef TARGET_INIT_LIBFUNCS
1716 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718 #if TARGET_MACHO
1719 #undef TARGET_BINDS_LOCAL_P
1720 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1721 #endif
1723 #undef TARGET_MS_BITFIELD_LAYOUT_P
1724 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726 #undef TARGET_ASM_OUTPUT_MI_THUNK
1727 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1730 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1733 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735 #undef TARGET_REGISTER_MOVE_COST
1736 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1737 #undef TARGET_MEMORY_MOVE_COST
1738 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1739 #undef TARGET_CANNOT_COPY_INSN_P
1740 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1741 #undef TARGET_RTX_COSTS
1742 #define TARGET_RTX_COSTS rs6000_rtx_costs
1743 #undef TARGET_ADDRESS_COST
1744 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1745 #undef TARGET_INSN_COST
1746 #define TARGET_INSN_COST rs6000_insn_cost
1748 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1749 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751 #undef TARGET_PROMOTE_FUNCTION_MODE
1752 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754 #undef TARGET_RETURN_IN_MEMORY
1755 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757 #undef TARGET_RETURN_IN_MSB
1758 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760 #undef TARGET_SETUP_INCOMING_VARARGS
1761 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763 /* Always strict argument naming on rs6000. */
1764 #undef TARGET_STRICT_ARGUMENT_NAMING
1765 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1767 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1768 #undef TARGET_SPLIT_COMPLEX_ARG
1769 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1770 #undef TARGET_MUST_PASS_IN_STACK
1771 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1772 #undef TARGET_PASS_BY_REFERENCE
1773 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1774 #undef TARGET_ARG_PARTIAL_BYTES
1775 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1776 #undef TARGET_FUNCTION_ARG_ADVANCE
1777 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1778 #undef TARGET_FUNCTION_ARG
1779 #define TARGET_FUNCTION_ARG rs6000_function_arg
1780 #undef TARGET_FUNCTION_ARG_PADDING
1781 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1782 #undef TARGET_FUNCTION_ARG_BOUNDARY
1783 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785 #undef TARGET_BUILD_BUILTIN_VA_LIST
1786 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788 #undef TARGET_EXPAND_BUILTIN_VA_START
1789 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1792 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794 #undef TARGET_EH_RETURN_FILTER_MODE
1795 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1798 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1800 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1801 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1803 #undef TARGET_FLOATN_MODE
1804 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1806 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1807 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1809 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1810 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1812 #undef TARGET_MD_ASM_ADJUST
1813 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815 #undef TARGET_OPTION_OVERRIDE
1816 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1819 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1820 rs6000_builtin_vectorized_function
1822 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1823 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1824 rs6000_builtin_md_vectorized_function
1826 #undef TARGET_STACK_PROTECT_GUARD
1827 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829 #if !TARGET_MACHO
1830 #undef TARGET_STACK_PROTECT_FAIL
1831 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1832 #endif
1834 #ifdef HAVE_AS_TLS
1835 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1836 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1837 #endif
1839 /* Use a 32-bit anchor range. This leads to sequences like:
1841 addis tmp,anchor,high
1842 add dest,tmp,low
1844 where tmp itself acts as an anchor, and can be shared between
1845 accesses to the same 64k page. */
1846 #undef TARGET_MIN_ANCHOR_OFFSET
1847 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1848 #undef TARGET_MAX_ANCHOR_OFFSET
1849 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1850 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1851 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1852 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1853 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855 #undef TARGET_BUILTIN_RECIPROCAL
1856 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858 #undef TARGET_SECONDARY_RELOAD
1859 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED
1861 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1862 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1863 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865 #undef TARGET_LEGITIMATE_ADDRESS_P
1866 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1869 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1872 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874 #undef TARGET_CAN_ELIMINATE
1875 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1878 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1881 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883 #undef TARGET_TRAMPOLINE_INIT
1884 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886 #undef TARGET_FUNCTION_VALUE
1887 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1890 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892 #undef TARGET_OPTION_SAVE
1893 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895 #undef TARGET_OPTION_RESTORE
1896 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898 #undef TARGET_OPTION_PRINT
1899 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901 #undef TARGET_CAN_INLINE_P
1902 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904 #undef TARGET_SET_CURRENT_FUNCTION
1905 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907 #undef TARGET_LEGITIMATE_CONSTANT_P
1908 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1911 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1913 #undef TARGET_CAN_USE_DOLOOP_P
1914 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1917 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1920 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1921 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1922 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1923 #undef TARGET_UNWIND_WORD_MODE
1924 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926 #undef TARGET_OFFLOAD_OPTIONS
1927 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929 #undef TARGET_C_MODE_FOR_SUFFIX
1930 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932 #undef TARGET_INVALID_BINARY_OP
1933 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935 #undef TARGET_OPTAB_SUPPORTED_P
1936 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1939 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941 #undef TARGET_COMPARE_VERSION_PRIORITY
1942 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1945 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1946 rs6000_generate_version_dispatcher_body
1948 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1949 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1950 rs6000_get_function_versions_dispatcher
1952 #undef TARGET_OPTION_FUNCTION_VERSIONS
1953 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955 #undef TARGET_HARD_REGNO_NREGS
1956 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1957 #undef TARGET_HARD_REGNO_MODE_OK
1958 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960 #undef TARGET_MODES_TIEABLE_P
1961 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1964 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1965 rs6000_hard_regno_call_part_clobbered
1967 #undef TARGET_SLOW_UNALIGNED_ACCESS
1968 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970 #undef TARGET_CAN_CHANGE_MODE_CLASS
1971 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973 #undef TARGET_CONSTANT_ALIGNMENT
1974 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976 #undef TARGET_STARTING_FRAME_OFFSET
1977 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1980 /* Processor table. */
1981 struct rs6000_ptt
1983 const char *const name; /* Canonical processor name. */
1984 const enum processor_type processor; /* Processor type enum value. */
1985 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1988 static struct rs6000_ptt const processor_target_table[] =
1990 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1991 #include "rs6000-cpus.def"
1992 #undef RS6000_CPU
1995 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1996 name is invalid. */
1998 static int
1999 rs6000_cpu_name_lookup (const char *name)
2001 size_t i;
2003 if (name != NULL)
2005 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2006 if (! strcmp (name, processor_target_table[i].name))
2007 return (int)i;
2010 return -1;
2014 /* Return number of consecutive hard regs needed starting at reg REGNO
2015 to hold something of mode MODE.
2016 This is ordinarily the length in words of a value of mode MODE
2017 but can be less for certain modes in special long registers.
2019 POWER and PowerPC GPRs hold 32 bits worth;
2020 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2022 static int
2023 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2025 unsigned HOST_WIDE_INT reg_size;
2027 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2028 128-bit floating point that can go in vector registers, which has VSX
2029 memory addressing. */
2030 if (FP_REGNO_P (regno))
2031 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2032 ? UNITS_PER_VSX_WORD
2033 : UNITS_PER_FP_WORD);
2035 else if (ALTIVEC_REGNO_P (regno))
2036 reg_size = UNITS_PER_ALTIVEC_WORD;
2038 else
2039 reg_size = UNITS_PER_WORD;
2041 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2044 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2045 MODE. */
2046 static int
2047 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2049 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2051 if (COMPLEX_MODE_P (mode))
2052 mode = GET_MODE_INNER (mode);
2054 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2055 register combinations, and use PTImode where we need to deal with quad
2056 word memory operations. Don't allow quad words in the argument or frame
2057 pointer registers, just registers 0..31. */
2058 if (mode == PTImode)
2059 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2060 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2061 && ((regno & 1) == 0));
2063 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2064 implementations. Don't allow an item to be split between a FP register
2065 and an Altivec register. Allow TImode in all VSX registers if the user
2066 asked for it. */
2067 if (TARGET_VSX && VSX_REGNO_P (regno)
2068 && (VECTOR_MEM_VSX_P (mode)
2069 || FLOAT128_VECTOR_P (mode)
2070 || reg_addr[mode].scalar_in_vmx_p
2071 || mode == TImode
2072 || (TARGET_VADDUQM && mode == V1TImode)))
2074 if (FP_REGNO_P (regno))
2075 return FP_REGNO_P (last_regno);
2077 if (ALTIVEC_REGNO_P (regno))
2079 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2080 return 0;
2082 return ALTIVEC_REGNO_P (last_regno);
2086 /* The GPRs can hold any mode, but values bigger than one register
2087 cannot go past R31. */
2088 if (INT_REGNO_P (regno))
2089 return INT_REGNO_P (last_regno);
2091 /* The float registers (except for VSX vector modes) can only hold floating
2092 modes and DImode. */
2093 if (FP_REGNO_P (regno))
2095 if (FLOAT128_VECTOR_P (mode))
2096 return false;
2098 if (SCALAR_FLOAT_MODE_P (mode)
2099 && (mode != TDmode || (regno % 2) == 0)
2100 && FP_REGNO_P (last_regno))
2101 return 1;
2103 if (GET_MODE_CLASS (mode) == MODE_INT)
2105 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2106 return 1;
2108 if (TARGET_P8_VECTOR && (mode == SImode))
2109 return 1;
2111 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2112 return 1;
2115 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2116 && PAIRED_VECTOR_MODE (mode))
2117 return 1;
2119 return 0;
2122 /* The CR register can only hold CC modes. */
2123 if (CR_REGNO_P (regno))
2124 return GET_MODE_CLASS (mode) == MODE_CC;
2126 if (CA_REGNO_P (regno))
2127 return mode == Pmode || mode == SImode;
2129 /* AltiVec only in AldyVec registers. */
2130 if (ALTIVEC_REGNO_P (regno))
2131 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2132 || mode == V1TImode);
2134 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2135 and it must be able to fit within the register set. */
2137 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2140 /* Implement TARGET_HARD_REGNO_NREGS. */
2142 static unsigned int
2143 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2145 return rs6000_hard_regno_nregs[mode][regno];
2148 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2150 static bool
2151 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2153 return rs6000_hard_regno_mode_ok_p[mode][regno];
2156 /* Implement TARGET_MODES_TIEABLE_P.
2158 PTImode cannot tie with other modes because PTImode is restricted to even
2159 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2160 57744).
2162 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2163 128-bit floating point on VSX systems ties with other vectors. */
2165 static bool
2166 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2168 if (mode1 == PTImode)
2169 return mode2 == PTImode;
2170 if (mode2 == PTImode)
2171 return false;
2173 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2174 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2175 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2176 return false;
2178 if (SCALAR_FLOAT_MODE_P (mode1))
2179 return SCALAR_FLOAT_MODE_P (mode2);
2180 if (SCALAR_FLOAT_MODE_P (mode2))
2181 return false;
2183 if (GET_MODE_CLASS (mode1) == MODE_CC)
2184 return GET_MODE_CLASS (mode2) == MODE_CC;
2185 if (GET_MODE_CLASS (mode2) == MODE_CC)
2186 return false;
2188 if (PAIRED_VECTOR_MODE (mode1))
2189 return PAIRED_VECTOR_MODE (mode2);
2190 if (PAIRED_VECTOR_MODE (mode2))
2191 return false;
2193 return true;
2196 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2198 static bool
2199 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2201 if (TARGET_32BIT
2202 && TARGET_POWERPC64
2203 && GET_MODE_SIZE (mode) > 4
2204 && INT_REGNO_P (regno))
2205 return true;
2207 if (TARGET_VSX
2208 && FP_REGNO_P (regno)
2209 && GET_MODE_SIZE (mode) > 8
2210 && !FLOAT128_2REG_P (mode))
2211 return true;
2213 return false;
2216 /* Print interesting facts about registers. */
2217 static void
2218 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2220 int r, m;
2222 for (r = first_regno; r <= last_regno; ++r)
2224 const char *comma = "";
2225 int len;
2227 if (first_regno == last_regno)
2228 fprintf (stderr, "%s:\t", reg_name);
2229 else
2230 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2232 len = 8;
2233 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2234 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2236 if (len > 70)
2238 fprintf (stderr, ",\n\t");
2239 len = 8;
2240 comma = "";
2243 if (rs6000_hard_regno_nregs[m][r] > 1)
2244 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2245 rs6000_hard_regno_nregs[m][r]);
2246 else
2247 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2249 comma = ", ";
2252 if (call_used_regs[r])
2254 if (len > 70)
2256 fprintf (stderr, ",\n\t");
2257 len = 8;
2258 comma = "";
2261 len += fprintf (stderr, "%s%s", comma, "call-used");
2262 comma = ", ";
2265 if (fixed_regs[r])
2267 if (len > 70)
2269 fprintf (stderr, ",\n\t");
2270 len = 8;
2271 comma = "";
2274 len += fprintf (stderr, "%s%s", comma, "fixed");
2275 comma = ", ";
2278 if (len > 70)
2280 fprintf (stderr, ",\n\t");
2281 comma = "";
2284 len += fprintf (stderr, "%sreg-class = %s", comma,
2285 reg_class_names[(int)rs6000_regno_regclass[r]]);
2286 comma = ", ";
2288 if (len > 70)
2290 fprintf (stderr, ",\n\t");
2291 comma = "";
2294 fprintf (stderr, "%sregno = %d\n", comma, r);
2298 static const char *
2299 rs6000_debug_vector_unit (enum rs6000_vector v)
2301 const char *ret;
2303 switch (v)
2305 case VECTOR_NONE: ret = "none"; break;
2306 case VECTOR_ALTIVEC: ret = "altivec"; break;
2307 case VECTOR_VSX: ret = "vsx"; break;
2308 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2309 case VECTOR_PAIRED: ret = "paired"; break;
2310 case VECTOR_OTHER: ret = "other"; break;
2311 default: ret = "unknown"; break;
2314 return ret;
2317 /* Inner function printing just the address mask for a particular reload
2318 register class. */
2319 DEBUG_FUNCTION char *
2320 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2322 static char ret[8];
2323 char *p = ret;
2325 if ((mask & RELOAD_REG_VALID) != 0)
2326 *p++ = 'v';
2327 else if (keep_spaces)
2328 *p++ = ' ';
2330 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2331 *p++ = 'm';
2332 else if (keep_spaces)
2333 *p++ = ' ';
2335 if ((mask & RELOAD_REG_INDEXED) != 0)
2336 *p++ = 'i';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2341 *p++ = 'O';
2342 else if ((mask & RELOAD_REG_OFFSET) != 0)
2343 *p++ = 'o';
2344 else if (keep_spaces)
2345 *p++ = ' ';
2347 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2348 *p++ = '+';
2349 else if (keep_spaces)
2350 *p++ = ' ';
2352 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2353 *p++ = '+';
2354 else if (keep_spaces)
2355 *p++ = ' ';
2357 if ((mask & RELOAD_REG_AND_M16) != 0)
2358 *p++ = '&';
2359 else if (keep_spaces)
2360 *p++ = ' ';
2362 *p = '\0';
2364 return ret;
2367 /* Print the address masks in a human readble fashion. */
2368 DEBUG_FUNCTION void
2369 rs6000_debug_print_mode (ssize_t m)
2371 ssize_t rc;
2372 int spaces = 0;
2373 bool fuse_extra_p;
2375 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2376 for (rc = 0; rc < N_RELOAD_REG; rc++)
2377 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2378 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2381 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2382 fprintf (stderr, " Reload=%c%c",
2383 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2384 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2385 else
2386 spaces += sizeof (" Reload=sl") - 1;
2388 if (reg_addr[m].scalar_in_vmx_p)
2390 fprintf (stderr, "%*s Upper=y", spaces, "");
2391 spaces = 0;
2393 else
2394 spaces += sizeof (" Upper=y") - 1;
2396 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2397 || reg_addr[m].fused_toc);
2398 if (!fuse_extra_p)
2400 for (rc = 0; rc < N_RELOAD_REG; rc++)
2402 if (rc != RELOAD_REG_ANY)
2404 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2405 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2406 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2407 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2408 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2410 fuse_extra_p = true;
2411 break;
2417 if (fuse_extra_p)
2419 fprintf (stderr, "%*s Fuse:", spaces, "");
2420 spaces = 0;
2422 for (rc = 0; rc < N_RELOAD_REG; rc++)
2424 if (rc != RELOAD_REG_ANY)
2426 char load, store;
2428 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2429 load = 'l';
2430 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2431 load = 'L';
2432 else
2433 load = '-';
2435 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2436 store = 's';
2437 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2438 store = 'S';
2439 else
2440 store = '-';
2442 if (load == '-' && store == '-')
2443 spaces += 5;
2444 else
2446 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2447 reload_reg_map[rc].name[0], load, store);
2448 spaces = 0;
2453 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2455 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2456 spaces = 0;
2458 else
2459 spaces += sizeof (" P8gpr") - 1;
2461 if (reg_addr[m].fused_toc)
2463 fprintf (stderr, "%*sToc", (spaces + 1), "");
2464 spaces = 0;
2466 else
2467 spaces += sizeof (" Toc") - 1;
2469 else
2470 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2472 if (rs6000_vector_unit[m] != VECTOR_NONE
2473 || rs6000_vector_mem[m] != VECTOR_NONE)
2475 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2476 spaces, "",
2477 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2478 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2481 fputs ("\n", stderr);
2484 #define DEBUG_FMT_ID "%-32s= "
2485 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2486 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2487 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2489 /* Print various interesting information with -mdebug=reg. */
2490 static void
2491 rs6000_debug_reg_global (void)
2493 static const char *const tf[2] = { "false", "true" };
2494 const char *nl = (const char *)0;
2495 int m;
2496 size_t m1, m2, v;
2497 char costly_num[20];
2498 char nop_num[20];
2499 char flags_buffer[40];
2500 const char *costly_str;
2501 const char *nop_str;
2502 const char *trace_str;
2503 const char *abi_str;
2504 const char *cmodel_str;
2505 struct cl_target_option cl_opts;
2507 /* Modes we want tieable information on. */
2508 static const machine_mode print_tieable_modes[] = {
2509 QImode,
2510 HImode,
2511 SImode,
2512 DImode,
2513 TImode,
2514 PTImode,
2515 SFmode,
2516 DFmode,
2517 TFmode,
2518 IFmode,
2519 KFmode,
2520 SDmode,
2521 DDmode,
2522 TDmode,
2523 V2SImode,
2524 V16QImode,
2525 V8HImode,
2526 V4SImode,
2527 V2DImode,
2528 V1TImode,
2529 V32QImode,
2530 V16HImode,
2531 V8SImode,
2532 V4DImode,
2533 V2TImode,
2534 V2SFmode,
2535 V4SFmode,
2536 V2DFmode,
2537 V8SFmode,
2538 V4DFmode,
2539 CCmode,
2540 CCUNSmode,
2541 CCEQmode,
2544 /* Virtual regs we are interested in. */
2545 const static struct {
2546 int regno; /* register number. */
2547 const char *name; /* register name. */
2548 } virtual_regs[] = {
2549 { STACK_POINTER_REGNUM, "stack pointer:" },
2550 { TOC_REGNUM, "toc: " },
2551 { STATIC_CHAIN_REGNUM, "static chain: " },
2552 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2553 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2554 { ARG_POINTER_REGNUM, "arg pointer: " },
2555 { FRAME_POINTER_REGNUM, "frame pointer:" },
2556 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2557 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2558 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2559 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2560 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2561 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2562 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2563 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2564 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2567 fputs ("\nHard register information:\n", stderr);
2568 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2569 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2570 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2571 LAST_ALTIVEC_REGNO,
2572 "vs");
2573 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2574 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2575 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2576 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2577 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2578 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2580 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2581 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2582 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2584 fprintf (stderr,
2585 "\n"
2586 "d reg_class = %s\n"
2587 "f reg_class = %s\n"
2588 "v reg_class = %s\n"
2589 "wa reg_class = %s\n"
2590 "wb reg_class = %s\n"
2591 "wd reg_class = %s\n"
2592 "we reg_class = %s\n"
2593 "wf reg_class = %s\n"
2594 "wg reg_class = %s\n"
2595 "wh reg_class = %s\n"
2596 "wi reg_class = %s\n"
2597 "wj reg_class = %s\n"
2598 "wk reg_class = %s\n"
2599 "wl reg_class = %s\n"
2600 "wm reg_class = %s\n"
2601 "wo reg_class = %s\n"
2602 "wp reg_class = %s\n"
2603 "wq reg_class = %s\n"
2604 "wr reg_class = %s\n"
2605 "ws reg_class = %s\n"
2606 "wt reg_class = %s\n"
2607 "wu reg_class = %s\n"
2608 "wv reg_class = %s\n"
2609 "ww reg_class = %s\n"
2610 "wx reg_class = %s\n"
2611 "wy reg_class = %s\n"
2612 "wz reg_class = %s\n"
2613 "wA reg_class = %s\n"
2614 "wH reg_class = %s\n"
2615 "wI reg_class = %s\n"
2616 "wJ reg_class = %s\n"
2617 "wK reg_class = %s\n"
2618 "\n",
2619 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2652 nl = "\n";
2653 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2654 rs6000_debug_print_mode (m);
2656 fputs ("\n", stderr);
2658 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2660 machine_mode mode1 = print_tieable_modes[m1];
2661 bool first_time = true;
2663 nl = (const char *)0;
2664 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2666 machine_mode mode2 = print_tieable_modes[m2];
2667 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2669 if (first_time)
2671 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2672 nl = "\n";
2673 first_time = false;
2676 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2680 if (!first_time)
2681 fputs ("\n", stderr);
2684 if (nl)
2685 fputs (nl, stderr);
2687 if (rs6000_recip_control)
2689 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2691 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2692 if (rs6000_recip_bits[m])
2694 fprintf (stderr,
2695 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2696 GET_MODE_NAME (m),
2697 (RS6000_RECIP_AUTO_RE_P (m)
2698 ? "auto"
2699 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2700 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2701 ? "auto"
2702 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2705 fputs ("\n", stderr);
2708 if (rs6000_cpu_index >= 0)
2710 const char *name = processor_target_table[rs6000_cpu_index].name;
2711 HOST_WIDE_INT flags
2712 = processor_target_table[rs6000_cpu_index].target_enable;
2714 sprintf (flags_buffer, "-mcpu=%s flags", name);
2715 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2717 else
2718 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2720 if (rs6000_tune_index >= 0)
2722 const char *name = processor_target_table[rs6000_tune_index].name;
2723 HOST_WIDE_INT flags
2724 = processor_target_table[rs6000_tune_index].target_enable;
2726 sprintf (flags_buffer, "-mtune=%s flags", name);
2727 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2729 else
2730 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2732 cl_target_option_save (&cl_opts, &global_options);
2733 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2734 rs6000_isa_flags);
2736 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2737 rs6000_isa_flags_explicit);
2739 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2740 rs6000_builtin_mask);
2742 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2744 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2745 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2747 switch (rs6000_sched_costly_dep)
2749 case max_dep_latency:
2750 costly_str = "max_dep_latency";
2751 break;
2753 case no_dep_costly:
2754 costly_str = "no_dep_costly";
2755 break;
2757 case all_deps_costly:
2758 costly_str = "all_deps_costly";
2759 break;
2761 case true_store_to_load_dep_costly:
2762 costly_str = "true_store_to_load_dep_costly";
2763 break;
2765 case store_to_load_dep_costly:
2766 costly_str = "store_to_load_dep_costly";
2767 break;
2769 default:
2770 costly_str = costly_num;
2771 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2772 break;
2775 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2777 switch (rs6000_sched_insert_nops)
2779 case sched_finish_regroup_exact:
2780 nop_str = "sched_finish_regroup_exact";
2781 break;
2783 case sched_finish_pad_groups:
2784 nop_str = "sched_finish_pad_groups";
2785 break;
2787 case sched_finish_none:
2788 nop_str = "sched_finish_none";
2789 break;
2791 default:
2792 nop_str = nop_num;
2793 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2794 break;
2797 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2799 switch (rs6000_sdata)
2801 default:
2802 case SDATA_NONE:
2803 break;
2805 case SDATA_DATA:
2806 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2807 break;
2809 case SDATA_SYSV:
2810 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2811 break;
2813 case SDATA_EABI:
2814 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2815 break;
2819 switch (rs6000_traceback)
2821 case traceback_default: trace_str = "default"; break;
2822 case traceback_none: trace_str = "none"; break;
2823 case traceback_part: trace_str = "part"; break;
2824 case traceback_full: trace_str = "full"; break;
2825 default: trace_str = "unknown"; break;
2828 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2830 switch (rs6000_current_cmodel)
2832 case CMODEL_SMALL: cmodel_str = "small"; break;
2833 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2834 case CMODEL_LARGE: cmodel_str = "large"; break;
2835 default: cmodel_str = "unknown"; break;
2838 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2840 switch (rs6000_current_abi)
2842 case ABI_NONE: abi_str = "none"; break;
2843 case ABI_AIX: abi_str = "aix"; break;
2844 case ABI_ELFv2: abi_str = "ELFv2"; break;
2845 case ABI_V4: abi_str = "V4"; break;
2846 case ABI_DARWIN: abi_str = "darwin"; break;
2847 default: abi_str = "unknown"; break;
2850 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2852 if (rs6000_altivec_abi)
2853 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2855 if (rs6000_darwin64_abi)
2856 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2858 fprintf (stderr, DEBUG_FMT_S, "single_float",
2859 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2861 fprintf (stderr, DEBUG_FMT_S, "double_float",
2862 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2864 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2865 (TARGET_SOFT_FLOAT ? "true" : "false"));
2867 if (TARGET_LINK_STACK)
2868 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2870 if (TARGET_P8_FUSION)
2872 char options[80];
2874 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2875 if (TARGET_TOC_FUSION)
2876 strcat (options, ", toc");
2878 if (TARGET_P8_FUSION_SIGN)
2879 strcat (options, ", sign");
2881 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2884 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2885 TARGET_SECURE_PLT ? "secure" : "bss");
2886 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2887 aix_struct_return ? "aix" : "sysv");
2888 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2889 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2890 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2891 tf[!!rs6000_align_branch_targets]);
2892 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2893 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2894 rs6000_long_double_type_size);
2895 if (rs6000_long_double_type_size == 128)
2897 fprintf (stderr, DEBUG_FMT_S, "long double type",
2898 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2899 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2900 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2902 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2903 (int)rs6000_sched_restricted_insns_priority);
2904 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2905 (int)END_BUILTINS);
2906 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2907 (int)RS6000_BUILTIN_COUNT);
2909 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2910 (int)TARGET_FLOAT128_ENABLE_TYPE);
2912 if (TARGET_VSX)
2913 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2914 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2916 if (TARGET_DIRECT_MOVE_128)
2917 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2918 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2922 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2923 legitimate address support to figure out the appropriate addressing to
2924 use. */
2926 static void
2927 rs6000_setup_reg_addr_masks (void)
2929 ssize_t rc, reg, m, nregs;
2930 addr_mask_type any_addr_mask, addr_mask;
2932 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2934 machine_mode m2 = (machine_mode) m;
2935 bool complex_p = false;
2936 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2937 size_t msize;
2939 if (COMPLEX_MODE_P (m2))
2941 complex_p = true;
2942 m2 = GET_MODE_INNER (m2);
2945 msize = GET_MODE_SIZE (m2);
2947 /* SDmode is special in that we want to access it only via REG+REG
2948 addressing on power7 and above, since we want to use the LFIWZX and
2949 STFIWZX instructions to load it. */
2950 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2952 any_addr_mask = 0;
2953 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2955 addr_mask = 0;
2956 reg = reload_reg_map[rc].reg;
2958 /* Can mode values go in the GPR/FPR/Altivec registers? */
2959 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2961 bool small_int_vsx_p = (small_int_p
2962 && (rc == RELOAD_REG_FPR
2963 || rc == RELOAD_REG_VMX));
2965 nregs = rs6000_hard_regno_nregs[m][reg];
2966 addr_mask |= RELOAD_REG_VALID;
2968 /* Indicate if the mode takes more than 1 physical register. If
2969 it takes a single register, indicate it can do REG+REG
2970 addressing. Small integers in VSX registers can only do
2971 REG+REG addressing. */
2972 if (small_int_vsx_p)
2973 addr_mask |= RELOAD_REG_INDEXED;
2974 else if (nregs > 1 || m == BLKmode || complex_p)
2975 addr_mask |= RELOAD_REG_MULTIPLE;
2976 else
2977 addr_mask |= RELOAD_REG_INDEXED;
2979 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2980 addressing. If we allow scalars into Altivec registers,
2981 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2983 if (TARGET_UPDATE
2984 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2985 && msize <= 8
2986 && !VECTOR_MODE_P (m2)
2987 && !FLOAT128_VECTOR_P (m2)
2988 && !complex_p
2989 && !small_int_vsx_p)
2991 addr_mask |= RELOAD_REG_PRE_INCDEC;
2993 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2994 we don't allow PRE_MODIFY for some multi-register
2995 operations. */
2996 switch (m)
2998 default:
2999 addr_mask |= RELOAD_REG_PRE_MODIFY;
3000 break;
3002 case E_DImode:
3003 if (TARGET_POWERPC64)
3004 addr_mask |= RELOAD_REG_PRE_MODIFY;
3005 break;
3007 case E_DFmode:
3008 case E_DDmode:
3009 if (TARGET_DF_INSN)
3010 addr_mask |= RELOAD_REG_PRE_MODIFY;
3011 break;
3016 /* GPR and FPR registers can do REG+OFFSET addressing, except
3017 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3018 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3019 if ((addr_mask != 0) && !indexed_only_p
3020 && msize <= 8
3021 && (rc == RELOAD_REG_GPR
3022 || ((msize == 8 || m2 == SFmode)
3023 && (rc == RELOAD_REG_FPR
3024 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3025 addr_mask |= RELOAD_REG_OFFSET;
3027 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3028 instructions are enabled. The offset for 128-bit VSX registers is
3029 only 12-bits. While GPRs can handle the full offset range, VSX
3030 registers can only handle the restricted range. */
3031 else if ((addr_mask != 0) && !indexed_only_p
3032 && msize == 16 && TARGET_P9_VECTOR
3033 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3034 || (m2 == TImode && TARGET_VSX)))
3036 addr_mask |= RELOAD_REG_OFFSET;
3037 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3038 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3041 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3042 addressing on 128-bit types. */
3043 if (rc == RELOAD_REG_VMX && msize == 16
3044 && (addr_mask & RELOAD_REG_VALID) != 0)
3045 addr_mask |= RELOAD_REG_AND_M16;
3047 reg_addr[m].addr_mask[rc] = addr_mask;
3048 any_addr_mask |= addr_mask;
3051 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3056 /* Initialize the various global tables that are based on register size. */
3057 static void
3058 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3060 ssize_t r, m, c;
3061 int align64;
3062 int align32;
3064 /* Precalculate REGNO_REG_CLASS. */
3065 rs6000_regno_regclass[0] = GENERAL_REGS;
3066 for (r = 1; r < 32; ++r)
3067 rs6000_regno_regclass[r] = BASE_REGS;
3069 for (r = 32; r < 64; ++r)
3070 rs6000_regno_regclass[r] = FLOAT_REGS;
3072 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3073 rs6000_regno_regclass[r] = NO_REGS;
3075 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3076 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3078 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3079 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3080 rs6000_regno_regclass[r] = CR_REGS;
3082 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3083 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3084 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3085 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3086 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3087 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3088 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3089 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3090 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3091 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3093 /* Precalculate register class to simpler reload register class. We don't
3094 need all of the register classes that are combinations of different
3095 classes, just the simple ones that have constraint letters. */
3096 for (c = 0; c < N_REG_CLASSES; c++)
3097 reg_class_to_reg_type[c] = NO_REG_TYPE;
3099 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3100 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3101 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3102 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3103 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3104 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3105 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3106 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3107 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3108 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3110 if (TARGET_VSX)
3112 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3113 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3115 else
3117 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3118 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3121 /* Precalculate the valid memory formats as well as the vector information,
3122 this must be set up before the rs6000_hard_regno_nregs_internal calls
3123 below. */
3124 gcc_assert ((int)VECTOR_NONE == 0);
3125 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3126 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3128 gcc_assert ((int)CODE_FOR_nothing == 0);
3129 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3131 gcc_assert ((int)NO_REGS == 0);
3132 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3134 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3135 believes it can use native alignment or still uses 128-bit alignment. */
3136 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3138 align64 = 64;
3139 align32 = 32;
3141 else
3143 align64 = 128;
3144 align32 = 128;
3147 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3148 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3149 if (TARGET_FLOAT128_TYPE)
3151 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3152 rs6000_vector_align[KFmode] = 128;
3154 if (FLOAT128_IEEE_P (TFmode))
3156 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3157 rs6000_vector_align[TFmode] = 128;
3161 /* V2DF mode, VSX only. */
3162 if (TARGET_VSX)
3164 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3165 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3166 rs6000_vector_align[V2DFmode] = align64;
3169 /* V4SF mode, either VSX or Altivec. */
3170 if (TARGET_VSX)
3172 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3173 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3174 rs6000_vector_align[V4SFmode] = align32;
3176 else if (TARGET_ALTIVEC)
3178 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3179 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3180 rs6000_vector_align[V4SFmode] = align32;
3183 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3184 and stores. */
3185 if (TARGET_ALTIVEC)
3187 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3188 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3189 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3190 rs6000_vector_align[V4SImode] = align32;
3191 rs6000_vector_align[V8HImode] = align32;
3192 rs6000_vector_align[V16QImode] = align32;
3194 if (TARGET_VSX)
3196 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3197 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3198 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3200 else
3202 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3203 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3204 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3208 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3209 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3210 if (TARGET_VSX)
3212 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3213 rs6000_vector_unit[V2DImode]
3214 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3215 rs6000_vector_align[V2DImode] = align64;
3217 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3218 rs6000_vector_unit[V1TImode]
3219 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3220 rs6000_vector_align[V1TImode] = 128;
3223 /* DFmode, see if we want to use the VSX unit. Memory is handled
3224 differently, so don't set rs6000_vector_mem. */
3225 if (TARGET_VSX)
3227 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3228 rs6000_vector_align[DFmode] = 64;
3231 /* SFmode, see if we want to use the VSX unit. */
3232 if (TARGET_P8_VECTOR)
3234 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3235 rs6000_vector_align[SFmode] = 32;
3238 /* Allow TImode in VSX register and set the VSX memory macros. */
3239 if (TARGET_VSX)
3241 rs6000_vector_mem[TImode] = VECTOR_VSX;
3242 rs6000_vector_align[TImode] = align64;
3245 /* TODO add paired floating point vector support. */
3247 /* Register class constraints for the constraints that depend on compile
3248 switches. When the VSX code was added, different constraints were added
3249 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3250 of the VSX registers are used. The register classes for scalar floating
3251 point types is set, based on whether we allow that type into the upper
3252 (Altivec) registers. GCC has register classes to target the Altivec
3253 registers for load/store operations, to select using a VSX memory
3254 operation instead of the traditional floating point operation. The
3255 constraints are:
3257 d - Register class to use with traditional DFmode instructions.
3258 f - Register class to use with traditional SFmode instructions.
3259 v - Altivec register.
3260 wa - Any VSX register.
3261 wc - Reserved to represent individual CR bits (used in LLVM).
3262 wd - Preferred register class for V2DFmode.
3263 wf - Preferred register class for V4SFmode.
3264 wg - Float register for power6x move insns.
3265 wh - FP register for direct move instructions.
3266 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3267 wj - FP or VSX register to hold 64-bit integers for direct moves.
3268 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3269 wl - Float register if we can do 32-bit signed int loads.
3270 wm - VSX register for ISA 2.07 direct move operations.
3271 wn - always NO_REGS.
3272 wr - GPR if 64-bit mode is permitted.
3273 ws - Register class to do ISA 2.06 DF operations.
3274 wt - VSX register for TImode in VSX registers.
3275 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3276 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3277 ww - Register class to do SF conversions in with VSX operations.
3278 wx - Float register if we can do 32-bit int stores.
3279 wy - Register class to do ISA 2.07 SF operations.
3280 wz - Float register if we can do 32-bit unsigned int loads.
3281 wH - Altivec register if SImode is allowed in VSX registers.
3282 wI - VSX register if SImode is allowed in VSX registers.
3283 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3284 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3286 if (TARGET_HARD_FLOAT)
3287 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3289 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3290 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3292 if (TARGET_VSX)
3294 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3295 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3296 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3297 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3298 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3299 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3300 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3303 /* Add conditional constraints based on various options, to allow us to
3304 collapse multiple insn patterns. */
3305 if (TARGET_ALTIVEC)
3306 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3308 if (TARGET_MFPGPR) /* DFmode */
3309 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3311 if (TARGET_LFIWAX)
3312 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3314 if (TARGET_DIRECT_MOVE)
3316 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3317 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3318 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3319 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3320 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3321 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3324 if (TARGET_POWERPC64)
3326 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3327 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3330 if (TARGET_P8_VECTOR) /* SFmode */
3332 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3333 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3334 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3336 else if (TARGET_VSX)
3337 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3339 if (TARGET_STFIWX)
3340 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3342 if (TARGET_LFIWZX)
3343 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3345 if (TARGET_FLOAT128_TYPE)
3347 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3348 if (FLOAT128_IEEE_P (TFmode))
3349 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3352 if (TARGET_P9_VECTOR)
3354 /* Support for new D-form instructions. */
3355 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3357 /* Support for ISA 3.0 (power9) vectors. */
3358 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3361 /* Support for new direct moves (ISA 3.0 + 64bit). */
3362 if (TARGET_DIRECT_MOVE_128)
3363 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3365 /* Support small integers in VSX registers. */
3366 if (TARGET_P8_VECTOR)
3368 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3369 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3370 if (TARGET_P9_VECTOR)
3372 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3373 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3377 /* Set up the reload helper and direct move functions. */
3378 if (TARGET_VSX || TARGET_ALTIVEC)
3380 if (TARGET_64BIT)
3382 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3383 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3384 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3385 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3386 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3387 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3388 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3389 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3390 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3391 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3392 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3393 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3394 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3395 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3396 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3397 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3398 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3399 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3400 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3401 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3403 if (FLOAT128_VECTOR_P (KFmode))
3405 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3406 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3409 if (FLOAT128_VECTOR_P (TFmode))
3411 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3412 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3415 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3416 available. */
3417 if (TARGET_NO_SDMODE_STACK)
3419 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3420 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3423 if (TARGET_VSX)
3425 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3426 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3429 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3431 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3432 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3433 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3434 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3435 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3436 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3437 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3438 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3439 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3441 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3442 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3443 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3444 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3445 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3446 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3447 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3448 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3449 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3451 if (FLOAT128_VECTOR_P (KFmode))
3453 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3454 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3457 if (FLOAT128_VECTOR_P (TFmode))
3459 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3460 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3464 else
3466 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3467 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3468 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3469 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3470 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3471 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3472 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3473 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3474 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3475 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3476 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3477 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3478 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3479 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3480 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3481 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3482 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3483 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3484 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3485 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3487 if (FLOAT128_VECTOR_P (KFmode))
3489 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3490 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3493 if (FLOAT128_IEEE_P (TFmode))
3495 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3496 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3499 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3500 available. */
3501 if (TARGET_NO_SDMODE_STACK)
3503 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3504 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3507 if (TARGET_VSX)
3509 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3510 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3513 if (TARGET_DIRECT_MOVE)
3515 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3516 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3517 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3521 reg_addr[DFmode].scalar_in_vmx_p = true;
3522 reg_addr[DImode].scalar_in_vmx_p = true;
3524 if (TARGET_P8_VECTOR)
3526 reg_addr[SFmode].scalar_in_vmx_p = true;
3527 reg_addr[SImode].scalar_in_vmx_p = true;
3529 if (TARGET_P9_VECTOR)
3531 reg_addr[HImode].scalar_in_vmx_p = true;
3532 reg_addr[QImode].scalar_in_vmx_p = true;
3537 /* Setup the fusion operations. */
3538 if (TARGET_P8_FUSION)
3540 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3541 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3542 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3543 if (TARGET_64BIT)
3544 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3547 if (TARGET_P9_FUSION)
3549 struct fuse_insns {
3550 enum machine_mode mode; /* mode of the fused type. */
3551 enum machine_mode pmode; /* pointer mode. */
3552 enum rs6000_reload_reg_type rtype; /* register type. */
3553 enum insn_code load; /* load insn. */
3554 enum insn_code store; /* store insn. */
3557 static const struct fuse_insns addis_insns[] = {
3558 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3559 CODE_FOR_fusion_vsx_di_sf_load,
3560 CODE_FOR_fusion_vsx_di_sf_store },
3562 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3563 CODE_FOR_fusion_vsx_si_sf_load,
3564 CODE_FOR_fusion_vsx_si_sf_store },
3566 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3567 CODE_FOR_fusion_vsx_di_df_load,
3568 CODE_FOR_fusion_vsx_di_df_store },
3570 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3571 CODE_FOR_fusion_vsx_si_df_load,
3572 CODE_FOR_fusion_vsx_si_df_store },
3574 { E_DImode, E_DImode, RELOAD_REG_FPR,
3575 CODE_FOR_fusion_vsx_di_di_load,
3576 CODE_FOR_fusion_vsx_di_di_store },
3578 { E_DImode, E_SImode, RELOAD_REG_FPR,
3579 CODE_FOR_fusion_vsx_si_di_load,
3580 CODE_FOR_fusion_vsx_si_di_store },
3582 { E_QImode, E_DImode, RELOAD_REG_GPR,
3583 CODE_FOR_fusion_gpr_di_qi_load,
3584 CODE_FOR_fusion_gpr_di_qi_store },
3586 { E_QImode, E_SImode, RELOAD_REG_GPR,
3587 CODE_FOR_fusion_gpr_si_qi_load,
3588 CODE_FOR_fusion_gpr_si_qi_store },
3590 { E_HImode, E_DImode, RELOAD_REG_GPR,
3591 CODE_FOR_fusion_gpr_di_hi_load,
3592 CODE_FOR_fusion_gpr_di_hi_store },
3594 { E_HImode, E_SImode, RELOAD_REG_GPR,
3595 CODE_FOR_fusion_gpr_si_hi_load,
3596 CODE_FOR_fusion_gpr_si_hi_store },
3598 { E_SImode, E_DImode, RELOAD_REG_GPR,
3599 CODE_FOR_fusion_gpr_di_si_load,
3600 CODE_FOR_fusion_gpr_di_si_store },
3602 { E_SImode, E_SImode, RELOAD_REG_GPR,
3603 CODE_FOR_fusion_gpr_si_si_load,
3604 CODE_FOR_fusion_gpr_si_si_store },
3606 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3607 CODE_FOR_fusion_gpr_di_sf_load,
3608 CODE_FOR_fusion_gpr_di_sf_store },
3610 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3611 CODE_FOR_fusion_gpr_si_sf_load,
3612 CODE_FOR_fusion_gpr_si_sf_store },
3614 { E_DImode, E_DImode, RELOAD_REG_GPR,
3615 CODE_FOR_fusion_gpr_di_di_load,
3616 CODE_FOR_fusion_gpr_di_di_store },
3618 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3619 CODE_FOR_fusion_gpr_di_df_load,
3620 CODE_FOR_fusion_gpr_di_df_store },
3623 machine_mode cur_pmode = Pmode;
3624 size_t i;
3626 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3628 machine_mode xmode = addis_insns[i].mode;
3629 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3631 if (addis_insns[i].pmode != cur_pmode)
3632 continue;
3634 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3635 continue;
3637 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3638 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3640 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3642 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3643 = addis_insns[i].load;
3644 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3645 = addis_insns[i].store;
3650 /* Note which types we support fusing TOC setup plus memory insn. We only do
3651 fused TOCs for medium/large code models. */
3652 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3653 && (TARGET_CMODEL != CMODEL_SMALL))
3655 reg_addr[QImode].fused_toc = true;
3656 reg_addr[HImode].fused_toc = true;
3657 reg_addr[SImode].fused_toc = true;
3658 reg_addr[DImode].fused_toc = true;
3659 if (TARGET_HARD_FLOAT)
3661 if (TARGET_SINGLE_FLOAT)
3662 reg_addr[SFmode].fused_toc = true;
3663 if (TARGET_DOUBLE_FLOAT)
3664 reg_addr[DFmode].fused_toc = true;
3668 /* Precalculate HARD_REGNO_NREGS. */
3669 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3670 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3671 rs6000_hard_regno_nregs[m][r]
3672 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3674 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3675 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3676 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3677 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3678 rs6000_hard_regno_mode_ok_p[m][r] = true;
3680 /* Precalculate CLASS_MAX_NREGS sizes. */
3681 for (c = 0; c < LIM_REG_CLASSES; ++c)
3683 int reg_size;
3685 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3686 reg_size = UNITS_PER_VSX_WORD;
3688 else if (c == ALTIVEC_REGS)
3689 reg_size = UNITS_PER_ALTIVEC_WORD;
3691 else if (c == FLOAT_REGS)
3692 reg_size = UNITS_PER_FP_WORD;
3694 else
3695 reg_size = UNITS_PER_WORD;
3697 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3699 machine_mode m2 = (machine_mode)m;
3700 int reg_size2 = reg_size;
3702 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3703 in VSX. */
3704 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3705 reg_size2 = UNITS_PER_FP_WORD;
3707 rs6000_class_max_nregs[m][c]
3708 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3712 /* Calculate which modes to automatically generate code to use a the
3713 reciprocal divide and square root instructions. In the future, possibly
3714 automatically generate the instructions even if the user did not specify
3715 -mrecip. The older machines double precision reciprocal sqrt estimate is
3716 not accurate enough. */
3717 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3718 if (TARGET_FRES)
3719 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3720 if (TARGET_FRE)
3721 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3722 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3723 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3724 if (VECTOR_UNIT_VSX_P (V2DFmode))
3725 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3727 if (TARGET_FRSQRTES)
3728 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3729 if (TARGET_FRSQRTE)
3730 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3731 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3732 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3733 if (VECTOR_UNIT_VSX_P (V2DFmode))
3734 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3736 if (rs6000_recip_control)
3738 if (!flag_finite_math_only)
3739 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3740 "-ffast-math");
3741 if (flag_trapping_math)
3742 warning (0, "%qs requires %qs or %qs", "-mrecip",
3743 "-fno-trapping-math", "-ffast-math");
3744 if (!flag_reciprocal_math)
3745 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3746 "-ffast-math");
3747 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3749 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3750 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3751 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3753 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3754 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3755 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3757 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3758 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3759 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3761 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3762 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3763 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3765 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3766 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3767 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3770 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3771 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3773 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3774 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3775 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3777 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3778 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3779 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3783 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3784 legitimate address support to figure out the appropriate addressing to
3785 use. */
3786 rs6000_setup_reg_addr_masks ();
3788 if (global_init_p || TARGET_DEBUG_TARGET)
3790 if (TARGET_DEBUG_REG)
3791 rs6000_debug_reg_global ();
3793 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3794 fprintf (stderr,
3795 "SImode variable mult cost = %d\n"
3796 "SImode constant mult cost = %d\n"
3797 "SImode short constant mult cost = %d\n"
3798 "DImode multipliciation cost = %d\n"
3799 "SImode division cost = %d\n"
3800 "DImode division cost = %d\n"
3801 "Simple fp operation cost = %d\n"
3802 "DFmode multiplication cost = %d\n"
3803 "SFmode division cost = %d\n"
3804 "DFmode division cost = %d\n"
3805 "cache line size = %d\n"
3806 "l1 cache size = %d\n"
3807 "l2 cache size = %d\n"
3808 "simultaneous prefetches = %d\n"
3809 "\n",
3810 rs6000_cost->mulsi,
3811 rs6000_cost->mulsi_const,
3812 rs6000_cost->mulsi_const9,
3813 rs6000_cost->muldi,
3814 rs6000_cost->divsi,
3815 rs6000_cost->divdi,
3816 rs6000_cost->fp,
3817 rs6000_cost->dmul,
3818 rs6000_cost->sdiv,
3819 rs6000_cost->ddiv,
3820 rs6000_cost->cache_line_size,
3821 rs6000_cost->l1_cache_size,
3822 rs6000_cost->l2_cache_size,
3823 rs6000_cost->simultaneous_prefetches);
3827 #if TARGET_MACHO
3828 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3830 static void
3831 darwin_rs6000_override_options (void)
3833 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3834 off. */
3835 rs6000_altivec_abi = 1;
3836 TARGET_ALTIVEC_VRSAVE = 1;
3837 rs6000_current_abi = ABI_DARWIN;
3839 if (DEFAULT_ABI == ABI_DARWIN
3840 && TARGET_64BIT)
3841 darwin_one_byte_bool = 1;
3843 if (TARGET_64BIT && ! TARGET_POWERPC64)
3845 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3846 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3848 if (flag_mkernel)
3850 rs6000_default_long_calls = 1;
3851 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3854 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3855 Altivec. */
3856 if (!flag_mkernel && !flag_apple_kext
3857 && TARGET_64BIT
3858 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3859 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3861 /* Unless the user (not the configurer) has explicitly overridden
3862 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3863 G4 unless targeting the kernel. */
3864 if (!flag_mkernel
3865 && !flag_apple_kext
3866 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3867 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3868 && ! global_options_set.x_rs6000_cpu_index)
3870 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3873 #endif
3875 /* If not otherwise specified by a target, make 'long double' equivalent to
3876 'double'. */
3878 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3879 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3880 #endif
3882 /* Return the builtin mask of the various options used that could affect which
3883 builtins were used. In the past we used target_flags, but we've run out of
3884 bits, and some options like PAIRED are no longer in target_flags. */
3886 HOST_WIDE_INT
3887 rs6000_builtin_mask_calculate (void)
3889 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3890 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3891 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3892 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3893 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3894 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3895 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3896 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3897 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3898 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3899 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3900 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3901 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3902 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3903 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3904 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3905 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3906 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3907 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3908 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3909 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3910 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3913 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3914 to clobber the XER[CA] bit because clobbering that bit without telling
3915 the compiler worked just fine with versions of GCC before GCC 5, and
3916 breaking a lot of older code in ways that are hard to track down is
3917 not such a great idea. */
3919 static rtx_insn *
3920 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3921 vec<const char *> &/*constraints*/,
3922 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3924 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3925 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3926 return NULL;
3929 /* Override command line options.
3931 Combine build-specific configuration information with options
3932 specified on the command line to set various state variables which
3933 influence code generation, optimization, and expansion of built-in
3934 functions. Assure that command-line configuration preferences are
3935 compatible with each other and with the build configuration; issue
3936 warnings while adjusting configuration or error messages while
3937 rejecting configuration.
3939 Upon entry to this function:
3941 This function is called once at the beginning of
3942 compilation, and then again at the start and end of compiling
3943 each section of code that has a different configuration, as
3944 indicated, for example, by adding the
3946 __attribute__((__target__("cpu=power9")))
3948 qualifier to a function definition or, for example, by bracketing
3949 code between
3951 #pragma GCC target("altivec")
3955 #pragma GCC reset_options
3957 directives. Parameter global_init_p is true for the initial
3958 invocation, which initializes global variables, and false for all
3959 subsequent invocations.
3962 Various global state information is assumed to be valid. This
3963 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3964 default CPU specified at build configure time, TARGET_DEFAULT,
3965 representing the default set of option flags for the default
3966 target, and global_options_set.x_rs6000_isa_flags, representing
3967 which options were requested on the command line.
3969 Upon return from this function:
3971 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3972 was set by name on the command line. Additionally, if certain
3973 attributes are automatically enabled or disabled by this function
3974 in order to assure compatibility between options and
3975 configuration, the flags associated with those attributes are
3976 also set. By setting these "explicit bits", we avoid the risk
3977 that other code might accidentally overwrite these particular
3978 attributes with "default values".
3980 The various bits of rs6000_isa_flags are set to indicate the
3981 target options that have been selected for the most current
3982 compilation efforts. This has the effect of also turning on the
3983 associated TARGET_XXX values since these are macros which are
3984 generally defined to test the corresponding bit of the
3985 rs6000_isa_flags variable.
3987 The variable rs6000_builtin_mask is set to represent the target
3988 options for the most current compilation efforts, consistent with
3989 the current contents of rs6000_isa_flags. This variable controls
3990 expansion of built-in functions.
3992 Various other global variables and fields of global structures
3993 (over 50 in all) are initialized to reflect the desired options
3994 for the most current compilation efforts. */
3996 static bool
3997 rs6000_option_override_internal (bool global_init_p)
3999 bool ret = true;
4001 HOST_WIDE_INT set_masks;
4002 HOST_WIDE_INT ignore_masks;
4003 int cpu_index = -1;
4004 int tune_index;
4005 struct cl_target_option *main_target_opt
4006 = ((global_init_p || target_option_default_node == NULL)
4007 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4009 /* Print defaults. */
4010 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4011 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4013 /* Remember the explicit arguments. */
4014 if (global_init_p)
4015 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4017 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4018 library functions, so warn about it. The flag may be useful for
4019 performance studies from time to time though, so don't disable it
4020 entirely. */
4021 if (global_options_set.x_rs6000_alignment_flags
4022 && rs6000_alignment_flags == MASK_ALIGN_POWER
4023 && DEFAULT_ABI == ABI_DARWIN
4024 && TARGET_64BIT)
4025 warning (0, "%qs is not supported for 64-bit Darwin;"
4026 " it is incompatible with the installed C and C++ libraries",
4027 "-malign-power");
4029 /* Numerous experiment shows that IRA based loop pressure
4030 calculation works better for RTL loop invariant motion on targets
4031 with enough (>= 32) registers. It is an expensive optimization.
4032 So it is on only for peak performance. */
4033 if (optimize >= 3 && global_init_p
4034 && !global_options_set.x_flag_ira_loop_pressure)
4035 flag_ira_loop_pressure = 1;
4037 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4038 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4039 options were already specified. */
4040 if (flag_sanitize & SANITIZE_USER_ADDRESS
4041 && !global_options_set.x_flag_asynchronous_unwind_tables)
4042 flag_asynchronous_unwind_tables = 1;
4044 /* Set the pointer size. */
4045 if (TARGET_64BIT)
4047 rs6000_pmode = DImode;
4048 rs6000_pointer_size = 64;
4050 else
4052 rs6000_pmode = SImode;
4053 rs6000_pointer_size = 32;
4056 /* Some OSs don't support saving the high part of 64-bit registers on context
4057 switch. Other OSs don't support saving Altivec registers. On those OSs,
4058 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4059 if the user wants either, the user must explicitly specify them and we
4060 won't interfere with the user's specification. */
4062 set_masks = POWERPC_MASKS;
4063 #ifdef OS_MISSING_POWERPC64
4064 if (OS_MISSING_POWERPC64)
4065 set_masks &= ~OPTION_MASK_POWERPC64;
4066 #endif
4067 #ifdef OS_MISSING_ALTIVEC
4068 if (OS_MISSING_ALTIVEC)
4069 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4070 | OTHER_VSX_VECTOR_MASKS);
4071 #endif
4073 /* Don't override by the processor default if given explicitly. */
4074 set_masks &= ~rs6000_isa_flags_explicit;
4076 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4077 the cpu in a target attribute or pragma, but did not specify a tuning
4078 option, use the cpu for the tuning option rather than the option specified
4079 with -mtune on the command line. Process a '--with-cpu' configuration
4080 request as an implicit --cpu. */
4081 if (rs6000_cpu_index >= 0)
4082 cpu_index = rs6000_cpu_index;
4083 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4084 cpu_index = main_target_opt->x_rs6000_cpu_index;
4085 else if (OPTION_TARGET_CPU_DEFAULT)
4086 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4088 if (cpu_index >= 0)
4090 const char *unavailable_cpu = NULL;
4091 switch (processor_target_table[cpu_index].processor)
4093 #ifndef HAVE_AS_POWER9
4094 case PROCESSOR_POWER9:
4095 unavailable_cpu = "power9";
4096 break;
4097 #endif
4098 #ifndef HAVE_AS_POWER8
4099 case PROCESSOR_POWER8:
4100 unavailable_cpu = "power8";
4101 break;
4102 #endif
4103 #ifndef HAVE_AS_POPCNTD
4104 case PROCESSOR_POWER7:
4105 unavailable_cpu = "power7";
4106 break;
4107 #endif
4108 #ifndef HAVE_AS_DFP
4109 case PROCESSOR_POWER6:
4110 unavailable_cpu = "power6";
4111 break;
4112 #endif
4113 #ifndef HAVE_AS_POPCNTB
4114 case PROCESSOR_POWER5:
4115 unavailable_cpu = "power5";
4116 break;
4117 #endif
4118 default:
4119 break;
4121 if (unavailable_cpu)
4123 cpu_index = -1;
4124 warning (0, "will not generate %qs instructions because "
4125 "assembler lacks %qs support", unavailable_cpu,
4126 unavailable_cpu);
4130 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4131 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4132 with those from the cpu, except for options that were explicitly set. If
4133 we don't have a cpu, do not override the target bits set in
4134 TARGET_DEFAULT. */
4135 if (cpu_index >= 0)
4137 rs6000_cpu_index = cpu_index;
4138 rs6000_isa_flags &= ~set_masks;
4139 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4140 & set_masks);
4142 else
4144 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4145 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4146 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4147 to using rs6000_isa_flags, we need to do the initialization here.
4149 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4150 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4151 HOST_WIDE_INT flags;
4152 if (TARGET_DEFAULT)
4153 flags = TARGET_DEFAULT;
4154 else
4156 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4157 const char *default_cpu = (!TARGET_POWERPC64
4158 ? "powerpc"
4159 : (BYTES_BIG_ENDIAN
4160 ? "powerpc64"
4161 : "powerpc64le"));
4162 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4163 flags = processor_target_table[default_cpu_index].target_enable;
4165 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4168 if (rs6000_tune_index >= 0)
4169 tune_index = rs6000_tune_index;
4170 else if (cpu_index >= 0)
4171 rs6000_tune_index = tune_index = cpu_index;
4172 else
4174 size_t i;
4175 enum processor_type tune_proc
4176 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4178 tune_index = -1;
4179 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4180 if (processor_target_table[i].processor == tune_proc)
4182 tune_index = i;
4183 break;
4187 gcc_assert (tune_index >= 0);
4188 rs6000_cpu = processor_target_table[tune_index].processor;
4190 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4191 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4192 || rs6000_cpu == PROCESSOR_PPCE5500)
4194 if (TARGET_ALTIVEC)
4195 error ("AltiVec not supported in this target");
4198 /* If we are optimizing big endian systems for space, use the load/store
4199 multiple and string instructions. */
4200 if (BYTES_BIG_ENDIAN && optimize_size)
4201 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4202 | OPTION_MASK_STRING);
4204 /* Don't allow -mmultiple or -mstring on little endian systems
4205 unless the cpu is a 750, because the hardware doesn't support the
4206 instructions used in little endian mode, and causes an alignment
4207 trap. The 750 does not cause an alignment trap (except when the
4208 target is unaligned). */
4210 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4212 if (TARGET_MULTIPLE)
4214 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4215 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4216 warning (0, "%qs is not supported on little endian systems",
4217 "-mmultiple");
4220 if (TARGET_STRING)
4222 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4223 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4224 warning (0, "%qs is not supported on little endian systems",
4225 "-mstring");
4229 /* If little-endian, default to -mstrict-align on older processors.
4230 Testing for htm matches power8 and later. */
4231 if (!BYTES_BIG_ENDIAN
4232 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4233 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4235 /* -maltivec={le,be} implies -maltivec. */
4236 if (rs6000_altivec_element_order != 0)
4237 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4239 /* Disallow -maltivec=le in big endian mode for now. This is not
4240 known to be useful for anyone. */
4241 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4243 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4244 rs6000_altivec_element_order = 0;
4247 if (!rs6000_fold_gimple)
4248 fprintf (stderr,
4249 "gimple folding of rs6000 builtins has been disabled.\n");
4251 /* Add some warnings for VSX. */
4252 if (TARGET_VSX)
4254 const char *msg = NULL;
4255 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4258 msg = N_("-mvsx requires hardware floating point");
4259 else
4261 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4262 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4265 else if (TARGET_PAIRED_FLOAT)
4266 msg = N_("-mvsx and -mpaired are incompatible");
4267 else if (TARGET_AVOID_XFORM > 0)
4268 msg = N_("-mvsx needs indexed addressing");
4269 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4270 & OPTION_MASK_ALTIVEC))
4272 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4273 msg = N_("-mvsx and -mno-altivec are incompatible");
4274 else
4275 msg = N_("-mno-altivec disables vsx");
4278 if (msg)
4280 warning (0, msg);
4281 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4282 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4286 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4287 the -mcpu setting to enable options that conflict. */
4288 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4289 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4290 | OPTION_MASK_ALTIVEC
4291 | OPTION_MASK_VSX)) != 0)
4292 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4293 | OPTION_MASK_DIRECT_MOVE)
4294 & ~rs6000_isa_flags_explicit);
4296 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4297 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4299 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4300 off all of the options that depend on those flags. */
4301 ignore_masks = rs6000_disable_incompatible_switches ();
4303 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4304 unless the user explicitly used the -mno-<option> to disable the code. */
4305 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4306 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4307 else if (TARGET_P9_MINMAX)
4309 if (cpu_index >= 0)
4311 if (cpu_index == PROCESSOR_POWER9)
4313 /* legacy behavior: allow -mcpu=power9 with certain
4314 capabilities explicitly disabled. */
4315 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4317 else
4318 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4319 "for <xxx> less than power9", "-mcpu");
4321 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4322 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4323 & rs6000_isa_flags_explicit))
4324 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4325 were explicitly cleared. */
4326 error ("%qs incompatible with explicitly disabled options",
4327 "-mpower9-minmax");
4328 else
4329 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4331 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4332 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4333 else if (TARGET_VSX)
4334 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4335 else if (TARGET_POPCNTD)
4336 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4337 else if (TARGET_DFP)
4338 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4339 else if (TARGET_CMPB)
4340 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4341 else if (TARGET_FPRND)
4342 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4343 else if (TARGET_POPCNTB)
4344 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4345 else if (TARGET_ALTIVEC)
4346 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4348 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4350 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4351 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4352 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4355 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4357 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4358 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4359 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4362 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4364 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4365 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4366 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4369 if (TARGET_P8_VECTOR && !TARGET_VSX)
4371 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4372 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4373 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4374 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4376 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4377 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4378 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4380 else
4382 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4383 not explicit. */
4384 rs6000_isa_flags |= OPTION_MASK_VSX;
4385 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4389 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4391 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4392 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4393 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4396 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4397 silently turn off quad memory mode. */
4398 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4400 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4401 warning (0, N_("-mquad-memory requires 64-bit mode"));
4403 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4404 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4406 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4407 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4410 /* Non-atomic quad memory load/store are disabled for little endian, since
4411 the words are reversed, but atomic operations can still be done by
4412 swapping the words. */
4413 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4415 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4416 warning (0, N_("-mquad-memory is not available in little endian "
4417 "mode"));
4419 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4422 /* Assume if the user asked for normal quad memory instructions, they want
4423 the atomic versions as well, unless they explicity told us not to use quad
4424 word atomic instructions. */
4425 if (TARGET_QUAD_MEMORY
4426 && !TARGET_QUAD_MEMORY_ATOMIC
4427 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4428 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4430 /* If we can shrink-wrap the TOC register save separately, then use
4431 -msave-toc-indirect unless explicitly disabled. */
4432 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4433 && flag_shrink_wrap_separate
4434 && optimize_function_for_speed_p (cfun))
4435 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4437 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4438 generating power8 instructions. */
4439 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4440 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4441 & OPTION_MASK_P8_FUSION);
4443 /* Setting additional fusion flags turns on base fusion. */
4444 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4446 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4448 if (TARGET_P8_FUSION_SIGN)
4449 error ("%qs requires %qs", "-mpower8-fusion-sign",
4450 "-mpower8-fusion");
4452 if (TARGET_TOC_FUSION)
4453 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4455 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4457 else
4458 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4461 /* Power9 fusion is a superset over power8 fusion. */
4462 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4464 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4466 /* We prefer to not mention undocumented options in
4467 error messages. However, if users have managed to select
4468 power9-fusion without selecting power8-fusion, they
4469 already know about undocumented flags. */
4470 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4471 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4473 else
4474 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4477 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4478 generating power9 instructions. */
4479 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4480 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4481 & OPTION_MASK_P9_FUSION);
4483 /* Power8 does not fuse sign extended loads with the addis. If we are
4484 optimizing at high levels for speed, convert a sign extended load into a
4485 zero extending load, and an explicit sign extension. */
4486 if (TARGET_P8_FUSION
4487 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4488 && optimize_function_for_speed_p (cfun)
4489 && optimize >= 3)
4490 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4492 /* TOC fusion requires 64-bit and medium/large code model. */
4493 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4495 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4496 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4497 warning (0, N_("-mtoc-fusion requires 64-bit"));
4500 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4502 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4503 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4504 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4507 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4508 model. */
4509 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4510 && (TARGET_CMODEL != CMODEL_SMALL)
4511 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4512 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4514 /* ISA 3.0 vector instructions include ISA 2.07. */
4515 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4517 /* We prefer to not mention undocumented options in
4518 error messages. However, if users have managed to select
4519 power9-vector without selecting power8-vector, they
4520 already know about undocumented flags. */
4521 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4522 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4523 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4524 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4526 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4527 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4528 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4530 else
4532 /* OPTION_MASK_P9_VECTOR is explicit and
4533 OPTION_MASK_P8_VECTOR is not explicit. */
4534 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4535 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4539 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4540 support. If we only have ISA 2.06 support, and the user did not specify
4541 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4542 but we don't enable the full vectorization support */
4543 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4544 TARGET_ALLOW_MOVMISALIGN = 1;
4546 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4548 if (TARGET_ALLOW_MOVMISALIGN > 0
4549 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4550 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4552 TARGET_ALLOW_MOVMISALIGN = 0;
4555 /* Determine when unaligned vector accesses are permitted, and when
4556 they are preferred over masked Altivec loads. Note that if
4557 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4558 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4559 not true. */
4560 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4562 if (!TARGET_VSX)
4564 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4565 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4567 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4570 else if (!TARGET_ALLOW_MOVMISALIGN)
4572 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4573 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4574 "-mallow-movmisalign");
4576 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4580 /* Set long double size before the IEEE 128-bit tests. */
4581 if (!global_options_set.x_rs6000_long_double_type_size)
4583 if (main_target_opt != NULL
4584 && (main_target_opt->x_rs6000_long_double_type_size
4585 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4586 error ("target attribute or pragma changes long double size");
4587 else
4588 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4591 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4592 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4593 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4594 those systems will not pick up this default. Warn if the user changes the
4595 default unless -Wno-psabi. */
4596 if (!global_options_set.x_rs6000_ieeequad)
4597 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4599 else if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4601 static bool warned_change_long_double;
4602 if (!warned_change_long_double)
4604 warned_change_long_double = true;
4605 if (TARGET_IEEEQUAD)
4606 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4607 else
4608 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4612 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4613 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4614 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4615 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4616 the keyword as well as the type. */
4617 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4619 /* IEEE 128-bit floating point requires VSX support. */
4620 if (TARGET_FLOAT128_KEYWORD)
4622 if (!TARGET_VSX)
4624 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4625 error ("%qs requires VSX support", "-mfloat128");
4627 TARGET_FLOAT128_TYPE = 0;
4628 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4629 | OPTION_MASK_FLOAT128_HW);
4631 else if (!TARGET_FLOAT128_TYPE)
4633 TARGET_FLOAT128_TYPE = 1;
4634 warning (0, "The -mfloat128 option may not be fully supported");
4638 /* Enable the __float128 keyword under Linux by default. */
4639 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4640 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4641 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4643 /* If we have are supporting the float128 type and full ISA 3.0 support,
4644 enable -mfloat128-hardware by default. However, don't enable the
4645 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4646 because sometimes the compiler wants to put things in an integer
4647 container, and if we don't have __int128 support, it is impossible. */
4648 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4649 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4650 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4651 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4653 if (TARGET_FLOAT128_HW
4654 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4656 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4657 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4659 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4662 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4664 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4665 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4667 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4670 /* Print the options after updating the defaults. */
4671 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4672 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4674 /* E500mc does "better" if we inline more aggressively. Respect the
4675 user's opinion, though. */
4676 if (rs6000_block_move_inline_limit == 0
4677 && (rs6000_cpu == PROCESSOR_PPCE500MC
4678 || rs6000_cpu == PROCESSOR_PPCE500MC64
4679 || rs6000_cpu == PROCESSOR_PPCE5500
4680 || rs6000_cpu == PROCESSOR_PPCE6500))
4681 rs6000_block_move_inline_limit = 128;
4683 /* store_one_arg depends on expand_block_move to handle at least the
4684 size of reg_parm_stack_space. */
4685 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4686 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4688 if (global_init_p)
4690 /* If the appropriate debug option is enabled, replace the target hooks
4691 with debug versions that call the real version and then prints
4692 debugging information. */
4693 if (TARGET_DEBUG_COST)
4695 targetm.rtx_costs = rs6000_debug_rtx_costs;
4696 targetm.address_cost = rs6000_debug_address_cost;
4697 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4700 if (TARGET_DEBUG_ADDR)
4702 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4703 targetm.legitimize_address = rs6000_debug_legitimize_address;
4704 rs6000_secondary_reload_class_ptr
4705 = rs6000_debug_secondary_reload_class;
4706 targetm.secondary_memory_needed
4707 = rs6000_debug_secondary_memory_needed;
4708 targetm.can_change_mode_class
4709 = rs6000_debug_can_change_mode_class;
4710 rs6000_preferred_reload_class_ptr
4711 = rs6000_debug_preferred_reload_class;
4712 rs6000_legitimize_reload_address_ptr
4713 = rs6000_debug_legitimize_reload_address;
4714 rs6000_mode_dependent_address_ptr
4715 = rs6000_debug_mode_dependent_address;
4718 if (rs6000_veclibabi_name)
4720 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4721 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4722 else
4724 error ("unknown vectorization library ABI type (%qs) for "
4725 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4726 ret = false;
4731 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4732 target attribute or pragma which automatically enables both options,
4733 unless the altivec ABI was set. This is set by default for 64-bit, but
4734 not for 32-bit. */
4735 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4737 TARGET_FLOAT128_TYPE = 0;
4738 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4739 | OPTION_MASK_FLOAT128_KEYWORD)
4740 & ~rs6000_isa_flags_explicit);
4743 /* Enable Altivec ABI for AIX -maltivec. */
4744 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4746 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4747 error ("target attribute or pragma changes AltiVec ABI");
4748 else
4749 rs6000_altivec_abi = 1;
4752 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4753 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4754 be explicitly overridden in either case. */
4755 if (TARGET_ELF)
4757 if (!global_options_set.x_rs6000_altivec_abi
4758 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4760 if (main_target_opt != NULL &&
4761 !main_target_opt->x_rs6000_altivec_abi)
4762 error ("target attribute or pragma changes AltiVec ABI");
4763 else
4764 rs6000_altivec_abi = 1;
4768 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4769 So far, the only darwin64 targets are also MACH-O. */
4770 if (TARGET_MACHO
4771 && DEFAULT_ABI == ABI_DARWIN
4772 && TARGET_64BIT)
4774 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4775 error ("target attribute or pragma changes darwin64 ABI");
4776 else
4778 rs6000_darwin64_abi = 1;
4779 /* Default to natural alignment, for better performance. */
4780 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4784 /* Place FP constants in the constant pool instead of TOC
4785 if section anchors enabled. */
4786 if (flag_section_anchors
4787 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4788 TARGET_NO_FP_IN_TOC = 1;
4790 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4791 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4793 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4794 SUBTARGET_OVERRIDE_OPTIONS;
4795 #endif
4796 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4797 SUBSUBTARGET_OVERRIDE_OPTIONS;
4798 #endif
4799 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4800 SUB3TARGET_OVERRIDE_OPTIONS;
4801 #endif
4803 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4804 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4806 /* For the E500 family of cores, reset the single/double FP flags to let us
4807 check that they remain constant across attributes or pragmas. Also,
4808 clear a possible request for string instructions, not supported and which
4809 we might have silently queried above for -Os. */
4811 switch (rs6000_cpu)
4813 case PROCESSOR_PPC8540:
4814 case PROCESSOR_PPC8548:
4815 case PROCESSOR_PPCE500MC:
4816 case PROCESSOR_PPCE500MC64:
4817 case PROCESSOR_PPCE5500:
4818 case PROCESSOR_PPCE6500:
4819 rs6000_single_float = 0;
4820 rs6000_double_float = 0;
4821 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4822 break;
4824 default:
4825 break;
4828 if (main_target_opt)
4830 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4831 error ("target attribute or pragma changes single precision floating "
4832 "point");
4833 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4834 error ("target attribute or pragma changes double precision floating "
4835 "point");
4838 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4839 && rs6000_cpu != PROCESSOR_POWER5
4840 && rs6000_cpu != PROCESSOR_POWER6
4841 && rs6000_cpu != PROCESSOR_POWER7
4842 && rs6000_cpu != PROCESSOR_POWER8
4843 && rs6000_cpu != PROCESSOR_POWER9
4844 && rs6000_cpu != PROCESSOR_PPCA2
4845 && rs6000_cpu != PROCESSOR_CELL
4846 && rs6000_cpu != PROCESSOR_PPC476);
4847 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4848 || rs6000_cpu == PROCESSOR_POWER5
4849 || rs6000_cpu == PROCESSOR_POWER7
4850 || rs6000_cpu == PROCESSOR_POWER8);
4851 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4852 || rs6000_cpu == PROCESSOR_POWER5
4853 || rs6000_cpu == PROCESSOR_POWER6
4854 || rs6000_cpu == PROCESSOR_POWER7
4855 || rs6000_cpu == PROCESSOR_POWER8
4856 || rs6000_cpu == PROCESSOR_POWER9
4857 || rs6000_cpu == PROCESSOR_PPCE500MC
4858 || rs6000_cpu == PROCESSOR_PPCE500MC64
4859 || rs6000_cpu == PROCESSOR_PPCE5500
4860 || rs6000_cpu == PROCESSOR_PPCE6500);
4862 /* Allow debug switches to override the above settings. These are set to -1
4863 in rs6000.opt to indicate the user hasn't directly set the switch. */
4864 if (TARGET_ALWAYS_HINT >= 0)
4865 rs6000_always_hint = TARGET_ALWAYS_HINT;
4867 if (TARGET_SCHED_GROUPS >= 0)
4868 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4870 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4871 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4873 rs6000_sched_restricted_insns_priority
4874 = (rs6000_sched_groups ? 1 : 0);
4876 /* Handle -msched-costly-dep option. */
4877 rs6000_sched_costly_dep
4878 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4880 if (rs6000_sched_costly_dep_str)
4882 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4883 rs6000_sched_costly_dep = no_dep_costly;
4884 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4885 rs6000_sched_costly_dep = all_deps_costly;
4886 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4887 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4888 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4889 rs6000_sched_costly_dep = store_to_load_dep_costly;
4890 else
4891 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4892 atoi (rs6000_sched_costly_dep_str));
4895 /* Handle -minsert-sched-nops option. */
4896 rs6000_sched_insert_nops
4897 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4899 if (rs6000_sched_insert_nops_str)
4901 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4902 rs6000_sched_insert_nops = sched_finish_none;
4903 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4904 rs6000_sched_insert_nops = sched_finish_pad_groups;
4905 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4906 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4907 else
4908 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4909 atoi (rs6000_sched_insert_nops_str));
4912 /* Handle stack protector */
4913 if (!global_options_set.x_rs6000_stack_protector_guard)
4914 #ifdef TARGET_THREAD_SSP_OFFSET
4915 rs6000_stack_protector_guard = SSP_TLS;
4916 #else
4917 rs6000_stack_protector_guard = SSP_GLOBAL;
4918 #endif
4920 #ifdef TARGET_THREAD_SSP_OFFSET
4921 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4922 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4923 #endif
4925 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4927 char *endp;
4928 const char *str = rs6000_stack_protector_guard_offset_str;
4930 errno = 0;
4931 long offset = strtol (str, &endp, 0);
4932 if (!*str || *endp || errno)
4933 error ("%qs is not a valid number in %qs", str,
4934 "-mstack-protector-guard-offset=");
4936 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4937 || (TARGET_64BIT && (offset & 3)))
4938 error ("%qs is not a valid offset in %qs", str,
4939 "-mstack-protector-guard-offset=");
4941 rs6000_stack_protector_guard_offset = offset;
4944 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4946 const char *str = rs6000_stack_protector_guard_reg_str;
4947 int reg = decode_reg_name (str);
4949 if (!IN_RANGE (reg, 1, 31))
4950 error ("%qs is not a valid base register in %qs", str,
4951 "-mstack-protector-guard-reg=");
4953 rs6000_stack_protector_guard_reg = reg;
4956 if (rs6000_stack_protector_guard == SSP_TLS
4957 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4958 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4960 if (global_init_p)
4962 #ifdef TARGET_REGNAMES
4963 /* If the user desires alternate register names, copy in the
4964 alternate names now. */
4965 if (TARGET_REGNAMES)
4966 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4967 #endif
4969 /* Set aix_struct_return last, after the ABI is determined.
4970 If -maix-struct-return or -msvr4-struct-return was explicitly
4971 used, don't override with the ABI default. */
4972 if (!global_options_set.x_aix_struct_return)
4973 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4975 #if 0
4976 /* IBM XL compiler defaults to unsigned bitfields. */
4977 if (TARGET_XL_COMPAT)
4978 flag_signed_bitfields = 0;
4979 #endif
4981 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4982 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4984 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4986 /* We can only guarantee the availability of DI pseudo-ops when
4987 assembling for 64-bit targets. */
4988 if (!TARGET_64BIT)
4990 targetm.asm_out.aligned_op.di = NULL;
4991 targetm.asm_out.unaligned_op.di = NULL;
4995 /* Set branch target alignment, if not optimizing for size. */
4996 if (!optimize_size)
4998 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4999 aligned 8byte to avoid misprediction by the branch predictor. */
5000 if (rs6000_cpu == PROCESSOR_TITAN
5001 || rs6000_cpu == PROCESSOR_CELL)
5003 if (align_functions <= 0)
5004 align_functions = 8;
5005 if (align_jumps <= 0)
5006 align_jumps = 8;
5007 if (align_loops <= 0)
5008 align_loops = 8;
5010 if (rs6000_align_branch_targets)
5012 if (align_functions <= 0)
5013 align_functions = 16;
5014 if (align_jumps <= 0)
5015 align_jumps = 16;
5016 if (align_loops <= 0)
5018 can_override_loop_align = 1;
5019 align_loops = 16;
5022 if (align_jumps_max_skip <= 0)
5023 align_jumps_max_skip = 15;
5024 if (align_loops_max_skip <= 0)
5025 align_loops_max_skip = 15;
5028 /* Arrange to save and restore machine status around nested functions. */
5029 init_machine_status = rs6000_init_machine_status;
5031 /* We should always be splitting complex arguments, but we can't break
5032 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5033 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5034 targetm.calls.split_complex_arg = NULL;
5036 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5037 if (DEFAULT_ABI == ABI_AIX)
5038 targetm.calls.custom_function_descriptors = 0;
5041 /* Initialize rs6000_cost with the appropriate target costs. */
5042 if (optimize_size)
5043 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5044 else
5045 switch (rs6000_cpu)
5047 case PROCESSOR_RS64A:
5048 rs6000_cost = &rs64a_cost;
5049 break;
5051 case PROCESSOR_MPCCORE:
5052 rs6000_cost = &mpccore_cost;
5053 break;
5055 case PROCESSOR_PPC403:
5056 rs6000_cost = &ppc403_cost;
5057 break;
5059 case PROCESSOR_PPC405:
5060 rs6000_cost = &ppc405_cost;
5061 break;
5063 case PROCESSOR_PPC440:
5064 rs6000_cost = &ppc440_cost;
5065 break;
5067 case PROCESSOR_PPC476:
5068 rs6000_cost = &ppc476_cost;
5069 break;
5071 case PROCESSOR_PPC601:
5072 rs6000_cost = &ppc601_cost;
5073 break;
5075 case PROCESSOR_PPC603:
5076 rs6000_cost = &ppc603_cost;
5077 break;
5079 case PROCESSOR_PPC604:
5080 rs6000_cost = &ppc604_cost;
5081 break;
5083 case PROCESSOR_PPC604e:
5084 rs6000_cost = &ppc604e_cost;
5085 break;
5087 case PROCESSOR_PPC620:
5088 rs6000_cost = &ppc620_cost;
5089 break;
5091 case PROCESSOR_PPC630:
5092 rs6000_cost = &ppc630_cost;
5093 break;
5095 case PROCESSOR_CELL:
5096 rs6000_cost = &ppccell_cost;
5097 break;
5099 case PROCESSOR_PPC750:
5100 case PROCESSOR_PPC7400:
5101 rs6000_cost = &ppc750_cost;
5102 break;
5104 case PROCESSOR_PPC7450:
5105 rs6000_cost = &ppc7450_cost;
5106 break;
5108 case PROCESSOR_PPC8540:
5109 case PROCESSOR_PPC8548:
5110 rs6000_cost = &ppc8540_cost;
5111 break;
5113 case PROCESSOR_PPCE300C2:
5114 case PROCESSOR_PPCE300C3:
5115 rs6000_cost = &ppce300c2c3_cost;
5116 break;
5118 case PROCESSOR_PPCE500MC:
5119 rs6000_cost = &ppce500mc_cost;
5120 break;
5122 case PROCESSOR_PPCE500MC64:
5123 rs6000_cost = &ppce500mc64_cost;
5124 break;
5126 case PROCESSOR_PPCE5500:
5127 rs6000_cost = &ppce5500_cost;
5128 break;
5130 case PROCESSOR_PPCE6500:
5131 rs6000_cost = &ppce6500_cost;
5132 break;
5134 case PROCESSOR_TITAN:
5135 rs6000_cost = &titan_cost;
5136 break;
5138 case PROCESSOR_POWER4:
5139 case PROCESSOR_POWER5:
5140 rs6000_cost = &power4_cost;
5141 break;
5143 case PROCESSOR_POWER6:
5144 rs6000_cost = &power6_cost;
5145 break;
5147 case PROCESSOR_POWER7:
5148 rs6000_cost = &power7_cost;
5149 break;
5151 case PROCESSOR_POWER8:
5152 rs6000_cost = &power8_cost;
5153 break;
5155 case PROCESSOR_POWER9:
5156 rs6000_cost = &power9_cost;
5157 break;
5159 case PROCESSOR_PPCA2:
5160 rs6000_cost = &ppca2_cost;
5161 break;
5163 default:
5164 gcc_unreachable ();
5167 if (global_init_p)
5169 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5170 rs6000_cost->simultaneous_prefetches,
5171 global_options.x_param_values,
5172 global_options_set.x_param_values);
5173 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5174 global_options.x_param_values,
5175 global_options_set.x_param_values);
5176 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5177 rs6000_cost->cache_line_size,
5178 global_options.x_param_values,
5179 global_options_set.x_param_values);
5180 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5181 global_options.x_param_values,
5182 global_options_set.x_param_values);
5184 /* Increase loop peeling limits based on performance analysis. */
5185 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5186 global_options.x_param_values,
5187 global_options_set.x_param_values);
5188 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5189 global_options.x_param_values,
5190 global_options_set.x_param_values);
5192 /* Use the 'model' -fsched-pressure algorithm by default. */
5193 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5194 SCHED_PRESSURE_MODEL,
5195 global_options.x_param_values,
5196 global_options_set.x_param_values);
5198 /* If using typedef char *va_list, signal that
5199 __builtin_va_start (&ap, 0) can be optimized to
5200 ap = __builtin_next_arg (0). */
5201 if (DEFAULT_ABI != ABI_V4)
5202 targetm.expand_builtin_va_start = NULL;
5205 /* Set up single/double float flags.
5206 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5207 then set both flags. */
5208 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5209 rs6000_single_float = rs6000_double_float = 1;
5211 /* If not explicitly specified via option, decide whether to generate indexed
5212 load/store instructions. A value of -1 indicates that the
5213 initial value of this variable has not been overwritten. During
5214 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5215 if (TARGET_AVOID_XFORM == -1)
5216 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5217 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5218 need indexed accesses and the type used is the scalar type of the element
5219 being loaded or stored. */
5220 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5221 && !TARGET_ALTIVEC);
5223 /* Set the -mrecip options. */
5224 if (rs6000_recip_name)
5226 char *p = ASTRDUP (rs6000_recip_name);
5227 char *q;
5228 unsigned int mask, i;
5229 bool invert;
5231 while ((q = strtok (p, ",")) != NULL)
5233 p = NULL;
5234 if (*q == '!')
5236 invert = true;
5237 q++;
5239 else
5240 invert = false;
5242 if (!strcmp (q, "default"))
5243 mask = ((TARGET_RECIP_PRECISION)
5244 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5245 else
5247 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5248 if (!strcmp (q, recip_options[i].string))
5250 mask = recip_options[i].mask;
5251 break;
5254 if (i == ARRAY_SIZE (recip_options))
5256 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5257 invert = false;
5258 mask = 0;
5259 ret = false;
5263 if (invert)
5264 rs6000_recip_control &= ~mask;
5265 else
5266 rs6000_recip_control |= mask;
5270 /* Set the builtin mask of the various options used that could affect which
5271 builtins were used. In the past we used target_flags, but we've run out
5272 of bits, and some options like PAIRED are no longer in target_flags. */
5273 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5274 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5275 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5276 rs6000_builtin_mask);
5278 /* Initialize all of the registers. */
5279 rs6000_init_hard_regno_mode_ok (global_init_p);
5281 /* Save the initial options in case the user does function specific options */
5282 if (global_init_p)
5283 target_option_default_node = target_option_current_node
5284 = build_target_option_node (&global_options);
5286 /* If not explicitly specified via option, decide whether to generate the
5287 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5288 if (TARGET_LINK_STACK == -1)
5289 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5291 return ret;
5294 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5295 define the target cpu type. */
5297 static void
5298 rs6000_option_override (void)
5300 (void) rs6000_option_override_internal (true);
5304 /* Implement targetm.vectorize.builtin_mask_for_load. */
5305 static tree
5306 rs6000_builtin_mask_for_load (void)
5308 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5309 if ((TARGET_ALTIVEC && !TARGET_VSX)
5310 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5311 return altivec_builtin_mask_for_load;
5312 else
5313 return 0;
5316 /* Implement LOOP_ALIGN. */
5318 rs6000_loop_align (rtx label)
5320 basic_block bb;
5321 int ninsns;
5323 /* Don't override loop alignment if -falign-loops was specified. */
5324 if (!can_override_loop_align)
5325 return align_loops_log;
5327 bb = BLOCK_FOR_INSN (label);
5328 ninsns = num_loop_insns(bb->loop_father);
5330 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5331 if (ninsns > 4 && ninsns <= 8
5332 && (rs6000_cpu == PROCESSOR_POWER4
5333 || rs6000_cpu == PROCESSOR_POWER5
5334 || rs6000_cpu == PROCESSOR_POWER6
5335 || rs6000_cpu == PROCESSOR_POWER7
5336 || rs6000_cpu == PROCESSOR_POWER8
5337 || rs6000_cpu == PROCESSOR_POWER9))
5338 return 5;
5339 else
5340 return align_loops_log;
5343 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5344 static int
5345 rs6000_loop_align_max_skip (rtx_insn *label)
5347 return (1 << rs6000_loop_align (label)) - 1;
5350 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5351 after applying N number of iterations. This routine does not determine
5352 how may iterations are required to reach desired alignment. */
5354 static bool
5355 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5357 if (is_packed)
5358 return false;
5360 if (TARGET_32BIT)
5362 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5363 return true;
5365 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5366 return true;
5368 return false;
5370 else
5372 if (TARGET_MACHO)
5373 return false;
5375 /* Assuming that all other types are naturally aligned. CHECKME! */
5376 return true;
5380 /* Return true if the vector misalignment factor is supported by the
5381 target. */
5382 static bool
5383 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5384 const_tree type,
5385 int misalignment,
5386 bool is_packed)
5388 if (TARGET_VSX)
5390 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5391 return true;
5393 /* Return if movmisalign pattern is not supported for this mode. */
5394 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5395 return false;
5397 if (misalignment == -1)
5399 /* Misalignment factor is unknown at compile time but we know
5400 it's word aligned. */
5401 if (rs6000_vector_alignment_reachable (type, is_packed))
5403 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5405 if (element_size == 64 || element_size == 32)
5406 return true;
5409 return false;
5412 /* VSX supports word-aligned vector. */
5413 if (misalignment % 4 == 0)
5414 return true;
5416 return false;
5419 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5420 static int
5421 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5422 tree vectype, int misalign)
5424 unsigned elements;
5425 tree elem_type;
5427 switch (type_of_cost)
5429 case scalar_stmt:
5430 case scalar_load:
5431 case scalar_store:
5432 case vector_stmt:
5433 case vector_load:
5434 case vector_store:
5435 case vec_to_scalar:
5436 case scalar_to_vec:
5437 case cond_branch_not_taken:
5438 return 1;
5440 case vec_perm:
5441 if (TARGET_VSX)
5442 return 3;
5443 else
5444 return 1;
5446 case vec_promote_demote:
5447 if (TARGET_VSX)
5448 return 4;
5449 else
5450 return 1;
5452 case cond_branch_taken:
5453 return 3;
5455 case unaligned_load:
5456 case vector_gather_load:
5457 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5458 return 1;
5460 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5462 elements = TYPE_VECTOR_SUBPARTS (vectype);
5463 if (elements == 2)
5464 /* Double word aligned. */
5465 return 2;
5467 if (elements == 4)
5469 switch (misalign)
5471 case 8:
5472 /* Double word aligned. */
5473 return 2;
5475 case -1:
5476 /* Unknown misalignment. */
5477 case 4:
5478 case 12:
5479 /* Word aligned. */
5480 return 22;
5482 default:
5483 gcc_unreachable ();
5488 if (TARGET_ALTIVEC)
5489 /* Misaligned loads are not supported. */
5490 gcc_unreachable ();
5492 return 2;
5494 case unaligned_store:
5495 case vector_scatter_store:
5496 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5497 return 1;
5499 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5501 elements = TYPE_VECTOR_SUBPARTS (vectype);
5502 if (elements == 2)
5503 /* Double word aligned. */
5504 return 2;
5506 if (elements == 4)
5508 switch (misalign)
5510 case 8:
5511 /* Double word aligned. */
5512 return 2;
5514 case -1:
5515 /* Unknown misalignment. */
5516 case 4:
5517 case 12:
5518 /* Word aligned. */
5519 return 23;
5521 default:
5522 gcc_unreachable ();
5527 if (TARGET_ALTIVEC)
5528 /* Misaligned stores are not supported. */
5529 gcc_unreachable ();
5531 return 2;
5533 case vec_construct:
5534 /* This is a rough approximation assuming non-constant elements
5535 constructed into a vector via element insertion. FIXME:
5536 vec_construct is not granular enough for uniformly good
5537 decisions. If the initialization is a splat, this is
5538 cheaper than we estimate. Improve this someday. */
5539 elem_type = TREE_TYPE (vectype);
5540 /* 32-bit vectors loaded into registers are stored as double
5541 precision, so we need 2 permutes, 2 converts, and 1 merge
5542 to construct a vector of short floats from them. */
5543 if (SCALAR_FLOAT_TYPE_P (elem_type)
5544 && TYPE_PRECISION (elem_type) == 32)
5545 return 5;
5546 /* On POWER9, integer vector types are built up in GPRs and then
5547 use a direct move (2 cycles). For POWER8 this is even worse,
5548 as we need two direct moves and a merge, and the direct moves
5549 are five cycles. */
5550 else if (INTEGRAL_TYPE_P (elem_type))
5552 if (TARGET_P9_VECTOR)
5553 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5554 else
5555 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5557 else
5558 /* V2DFmode doesn't need a direct move. */
5559 return 2;
5561 default:
5562 gcc_unreachable ();
5566 /* Implement targetm.vectorize.preferred_simd_mode. */
5568 static machine_mode
5569 rs6000_preferred_simd_mode (scalar_mode mode)
5571 if (TARGET_VSX)
5572 switch (mode)
5574 case E_DFmode:
5575 return V2DFmode;
5576 default:;
5578 if (TARGET_ALTIVEC || TARGET_VSX)
5579 switch (mode)
5581 case E_SFmode:
5582 return V4SFmode;
5583 case E_TImode:
5584 return V1TImode;
5585 case E_DImode:
5586 return V2DImode;
5587 case E_SImode:
5588 return V4SImode;
5589 case E_HImode:
5590 return V8HImode;
5591 case E_QImode:
5592 return V16QImode;
5593 default:;
5595 if (TARGET_PAIRED_FLOAT
5596 && mode == SFmode)
5597 return V2SFmode;
5598 return word_mode;
5601 typedef struct _rs6000_cost_data
5603 struct loop *loop_info;
5604 unsigned cost[3];
5605 } rs6000_cost_data;
5607 /* Test for likely overcommitment of vector hardware resources. If a
5608 loop iteration is relatively large, and too large a percentage of
5609 instructions in the loop are vectorized, the cost model may not
5610 adequately reflect delays from unavailable vector resources.
5611 Penalize the loop body cost for this case. */
5613 static void
5614 rs6000_density_test (rs6000_cost_data *data)
5616 const int DENSITY_PCT_THRESHOLD = 85;
5617 const int DENSITY_SIZE_THRESHOLD = 70;
5618 const int DENSITY_PENALTY = 10;
5619 struct loop *loop = data->loop_info;
5620 basic_block *bbs = get_loop_body (loop);
5621 int nbbs = loop->num_nodes;
5622 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5623 int i, density_pct;
5625 for (i = 0; i < nbbs; i++)
5627 basic_block bb = bbs[i];
5628 gimple_stmt_iterator gsi;
5630 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5632 gimple *stmt = gsi_stmt (gsi);
5633 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5635 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5636 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5637 not_vec_cost++;
5641 free (bbs);
5642 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5644 if (density_pct > DENSITY_PCT_THRESHOLD
5645 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5647 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5648 if (dump_enabled_p ())
5649 dump_printf_loc (MSG_NOTE, vect_location,
5650 "density %d%%, cost %d exceeds threshold, penalizing "
5651 "loop body cost by %d%%", density_pct,
5652 vec_cost + not_vec_cost, DENSITY_PENALTY);
5656 /* Implement targetm.vectorize.init_cost. */
5658 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5659 instruction is needed by the vectorization. */
5660 static bool rs6000_vect_nonmem;
5662 static void *
5663 rs6000_init_cost (struct loop *loop_info)
5665 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5666 data->loop_info = loop_info;
5667 data->cost[vect_prologue] = 0;
5668 data->cost[vect_body] = 0;
5669 data->cost[vect_epilogue] = 0;
5670 rs6000_vect_nonmem = false;
5671 return data;
5674 /* Implement targetm.vectorize.add_stmt_cost. */
5676 static unsigned
5677 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5678 struct _stmt_vec_info *stmt_info, int misalign,
5679 enum vect_cost_model_location where)
5681 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5682 unsigned retval = 0;
5684 if (flag_vect_cost_model)
5686 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5687 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5688 misalign);
5689 /* Statements in an inner loop relative to the loop being
5690 vectorized are weighted more heavily. The value here is
5691 arbitrary and could potentially be improved with analysis. */
5692 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5693 count *= 50; /* FIXME. */
5695 retval = (unsigned) (count * stmt_cost);
5696 cost_data->cost[where] += retval;
5698 /* Check whether we're doing something other than just a copy loop.
5699 Not all such loops may be profitably vectorized; see
5700 rs6000_finish_cost. */
5701 if ((kind == vec_to_scalar || kind == vec_perm
5702 || kind == vec_promote_demote || kind == vec_construct
5703 || kind == scalar_to_vec)
5704 || (where == vect_body && kind == vector_stmt))
5705 rs6000_vect_nonmem = true;
5708 return retval;
5711 /* Implement targetm.vectorize.finish_cost. */
5713 static void
5714 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5715 unsigned *body_cost, unsigned *epilogue_cost)
5717 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5719 if (cost_data->loop_info)
5720 rs6000_density_test (cost_data);
5722 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5723 that require versioning for any reason. The vectorization is at
5724 best a wash inside the loop, and the versioning checks make
5725 profitability highly unlikely and potentially quite harmful. */
5726 if (cost_data->loop_info)
5728 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5729 if (!rs6000_vect_nonmem
5730 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5731 && LOOP_REQUIRES_VERSIONING (vec_info))
5732 cost_data->cost[vect_body] += 10000;
5735 *prologue_cost = cost_data->cost[vect_prologue];
5736 *body_cost = cost_data->cost[vect_body];
5737 *epilogue_cost = cost_data->cost[vect_epilogue];
5740 /* Implement targetm.vectorize.destroy_cost_data. */
5742 static void
5743 rs6000_destroy_cost_data (void *data)
5745 free (data);
5748 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5749 library with vectorized intrinsics. */
5751 static tree
5752 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5753 tree type_in)
5755 char name[32];
5756 const char *suffix = NULL;
5757 tree fntype, new_fndecl, bdecl = NULL_TREE;
5758 int n_args = 1;
5759 const char *bname;
5760 machine_mode el_mode, in_mode;
5761 int n, in_n;
5763 /* Libmass is suitable for unsafe math only as it does not correctly support
5764 parts of IEEE with the required precision such as denormals. Only support
5765 it if we have VSX to use the simd d2 or f4 functions.
5766 XXX: Add variable length support. */
5767 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5768 return NULL_TREE;
5770 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5771 n = TYPE_VECTOR_SUBPARTS (type_out);
5772 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5773 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5774 if (el_mode != in_mode
5775 || n != in_n)
5776 return NULL_TREE;
5778 switch (fn)
5780 CASE_CFN_ATAN2:
5781 CASE_CFN_HYPOT:
5782 CASE_CFN_POW:
5783 n_args = 2;
5784 gcc_fallthrough ();
5786 CASE_CFN_ACOS:
5787 CASE_CFN_ACOSH:
5788 CASE_CFN_ASIN:
5789 CASE_CFN_ASINH:
5790 CASE_CFN_ATAN:
5791 CASE_CFN_ATANH:
5792 CASE_CFN_CBRT:
5793 CASE_CFN_COS:
5794 CASE_CFN_COSH:
5795 CASE_CFN_ERF:
5796 CASE_CFN_ERFC:
5797 CASE_CFN_EXP2:
5798 CASE_CFN_EXP:
5799 CASE_CFN_EXPM1:
5800 CASE_CFN_LGAMMA:
5801 CASE_CFN_LOG10:
5802 CASE_CFN_LOG1P:
5803 CASE_CFN_LOG2:
5804 CASE_CFN_LOG:
5805 CASE_CFN_SIN:
5806 CASE_CFN_SINH:
5807 CASE_CFN_SQRT:
5808 CASE_CFN_TAN:
5809 CASE_CFN_TANH:
5810 if (el_mode == DFmode && n == 2)
5812 bdecl = mathfn_built_in (double_type_node, fn);
5813 suffix = "d2"; /* pow -> powd2 */
5815 else if (el_mode == SFmode && n == 4)
5817 bdecl = mathfn_built_in (float_type_node, fn);
5818 suffix = "4"; /* powf -> powf4 */
5820 else
5821 return NULL_TREE;
5822 if (!bdecl)
5823 return NULL_TREE;
5824 break;
5826 default:
5827 return NULL_TREE;
5830 gcc_assert (suffix != NULL);
5831 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5832 if (!bname)
5833 return NULL_TREE;
5835 strcpy (name, bname + sizeof ("__builtin_") - 1);
5836 strcat (name, suffix);
5838 if (n_args == 1)
5839 fntype = build_function_type_list (type_out, type_in, NULL);
5840 else if (n_args == 2)
5841 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5842 else
5843 gcc_unreachable ();
5845 /* Build a function declaration for the vectorized function. */
5846 new_fndecl = build_decl (BUILTINS_LOCATION,
5847 FUNCTION_DECL, get_identifier (name), fntype);
5848 TREE_PUBLIC (new_fndecl) = 1;
5849 DECL_EXTERNAL (new_fndecl) = 1;
5850 DECL_IS_NOVOPS (new_fndecl) = 1;
5851 TREE_READONLY (new_fndecl) = 1;
5853 return new_fndecl;
5856 /* Returns a function decl for a vectorized version of the builtin function
5857 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5858 if it is not available. */
5860 static tree
5861 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5862 tree type_in)
5864 machine_mode in_mode, out_mode;
5865 int in_n, out_n;
5867 if (TARGET_DEBUG_BUILTIN)
5868 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5869 combined_fn_name (combined_fn (fn)),
5870 GET_MODE_NAME (TYPE_MODE (type_out)),
5871 GET_MODE_NAME (TYPE_MODE (type_in)));
5873 if (TREE_CODE (type_out) != VECTOR_TYPE
5874 || TREE_CODE (type_in) != VECTOR_TYPE)
5875 return NULL_TREE;
5877 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5878 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5879 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5880 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5882 switch (fn)
5884 CASE_CFN_COPYSIGN:
5885 if (VECTOR_UNIT_VSX_P (V2DFmode)
5886 && out_mode == DFmode && out_n == 2
5887 && in_mode == DFmode && in_n == 2)
5888 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5889 if (VECTOR_UNIT_VSX_P (V4SFmode)
5890 && out_mode == SFmode && out_n == 4
5891 && in_mode == SFmode && in_n == 4)
5892 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5893 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5894 && out_mode == SFmode && out_n == 4
5895 && in_mode == SFmode && in_n == 4)
5896 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5897 break;
5898 CASE_CFN_CEIL:
5899 if (VECTOR_UNIT_VSX_P (V2DFmode)
5900 && out_mode == DFmode && out_n == 2
5901 && in_mode == DFmode && in_n == 2)
5902 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5903 if (VECTOR_UNIT_VSX_P (V4SFmode)
5904 && out_mode == SFmode && out_n == 4
5905 && in_mode == SFmode && in_n == 4)
5906 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5907 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5908 && out_mode == SFmode && out_n == 4
5909 && in_mode == SFmode && in_n == 4)
5910 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5911 break;
5912 CASE_CFN_FLOOR:
5913 if (VECTOR_UNIT_VSX_P (V2DFmode)
5914 && out_mode == DFmode && out_n == 2
5915 && in_mode == DFmode && in_n == 2)
5916 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5917 if (VECTOR_UNIT_VSX_P (V4SFmode)
5918 && out_mode == SFmode && out_n == 4
5919 && in_mode == SFmode && in_n == 4)
5920 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5921 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5922 && out_mode == SFmode && out_n == 4
5923 && in_mode == SFmode && in_n == 4)
5924 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5925 break;
5926 CASE_CFN_FMA:
5927 if (VECTOR_UNIT_VSX_P (V2DFmode)
5928 && out_mode == DFmode && out_n == 2
5929 && in_mode == DFmode && in_n == 2)
5930 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5931 if (VECTOR_UNIT_VSX_P (V4SFmode)
5932 && out_mode == SFmode && out_n == 4
5933 && in_mode == SFmode && in_n == 4)
5934 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5935 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5936 && out_mode == SFmode && out_n == 4
5937 && in_mode == SFmode && in_n == 4)
5938 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5939 break;
5940 CASE_CFN_TRUNC:
5941 if (VECTOR_UNIT_VSX_P (V2DFmode)
5942 && out_mode == DFmode && out_n == 2
5943 && in_mode == DFmode && in_n == 2)
5944 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5945 if (VECTOR_UNIT_VSX_P (V4SFmode)
5946 && out_mode == SFmode && out_n == 4
5947 && in_mode == SFmode && in_n == 4)
5948 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5949 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5950 && out_mode == SFmode && out_n == 4
5951 && in_mode == SFmode && in_n == 4)
5952 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5953 break;
5954 CASE_CFN_NEARBYINT:
5955 if (VECTOR_UNIT_VSX_P (V2DFmode)
5956 && flag_unsafe_math_optimizations
5957 && out_mode == DFmode && out_n == 2
5958 && in_mode == DFmode && in_n == 2)
5959 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5960 if (VECTOR_UNIT_VSX_P (V4SFmode)
5961 && flag_unsafe_math_optimizations
5962 && out_mode == SFmode && out_n == 4
5963 && in_mode == SFmode && in_n == 4)
5964 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5965 break;
5966 CASE_CFN_RINT:
5967 if (VECTOR_UNIT_VSX_P (V2DFmode)
5968 && !flag_trapping_math
5969 && out_mode == DFmode && out_n == 2
5970 && in_mode == DFmode && in_n == 2)
5971 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5972 if (VECTOR_UNIT_VSX_P (V4SFmode)
5973 && !flag_trapping_math
5974 && out_mode == SFmode && out_n == 4
5975 && in_mode == SFmode && in_n == 4)
5976 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5977 break;
5978 default:
5979 break;
5982 /* Generate calls to libmass if appropriate. */
5983 if (rs6000_veclib_handler)
5984 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5986 return NULL_TREE;
5989 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5991 static tree
5992 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5993 tree type_in)
5995 machine_mode in_mode, out_mode;
5996 int in_n, out_n;
5998 if (TARGET_DEBUG_BUILTIN)
5999 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6000 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6001 GET_MODE_NAME (TYPE_MODE (type_out)),
6002 GET_MODE_NAME (TYPE_MODE (type_in)));
6004 if (TREE_CODE (type_out) != VECTOR_TYPE
6005 || TREE_CODE (type_in) != VECTOR_TYPE)
6006 return NULL_TREE;
6008 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6009 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6010 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6011 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6013 enum rs6000_builtins fn
6014 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6015 switch (fn)
6017 case RS6000_BUILTIN_RSQRTF:
6018 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6019 && out_mode == SFmode && out_n == 4
6020 && in_mode == SFmode && in_n == 4)
6021 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6022 break;
6023 case RS6000_BUILTIN_RSQRT:
6024 if (VECTOR_UNIT_VSX_P (V2DFmode)
6025 && out_mode == DFmode && out_n == 2
6026 && in_mode == DFmode && in_n == 2)
6027 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6028 break;
6029 case RS6000_BUILTIN_RECIPF:
6030 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6031 && out_mode == SFmode && out_n == 4
6032 && in_mode == SFmode && in_n == 4)
6033 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6034 break;
6035 case RS6000_BUILTIN_RECIP:
6036 if (VECTOR_UNIT_VSX_P (V2DFmode)
6037 && out_mode == DFmode && out_n == 2
6038 && in_mode == DFmode && in_n == 2)
6039 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6040 break;
6041 default:
6042 break;
6044 return NULL_TREE;
6047 /* Default CPU string for rs6000*_file_start functions. */
6048 static const char *rs6000_default_cpu;
6050 /* Do anything needed at the start of the asm file. */
6052 static void
6053 rs6000_file_start (void)
6055 char buffer[80];
6056 const char *start = buffer;
6057 FILE *file = asm_out_file;
6059 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6061 default_file_start ();
6063 if (flag_verbose_asm)
6065 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6067 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6069 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6070 start = "";
6073 if (global_options_set.x_rs6000_cpu_index)
6075 fprintf (file, "%s -mcpu=%s", start,
6076 processor_target_table[rs6000_cpu_index].name);
6077 start = "";
6080 if (global_options_set.x_rs6000_tune_index)
6082 fprintf (file, "%s -mtune=%s", start,
6083 processor_target_table[rs6000_tune_index].name);
6084 start = "";
6087 if (PPC405_ERRATUM77)
6089 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6090 start = "";
6093 #ifdef USING_ELFOS_H
6094 switch (rs6000_sdata)
6096 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6097 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6098 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6099 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6102 if (rs6000_sdata && g_switch_value)
6104 fprintf (file, "%s -G %d", start,
6105 g_switch_value);
6106 start = "";
6108 #endif
6110 if (*start == '\0')
6111 putc ('\n', file);
6114 #ifdef USING_ELFOS_H
6115 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6116 && !global_options_set.x_rs6000_cpu_index)
6118 fputs ("\t.machine ", asm_out_file);
6119 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6120 fputs ("power9\n", asm_out_file);
6121 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6122 fputs ("power8\n", asm_out_file);
6123 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6124 fputs ("power7\n", asm_out_file);
6125 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6126 fputs ("power6\n", asm_out_file);
6127 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6128 fputs ("power5\n", asm_out_file);
6129 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6130 fputs ("power4\n", asm_out_file);
6131 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6132 fputs ("ppc64\n", asm_out_file);
6133 else
6134 fputs ("ppc\n", asm_out_file);
6136 #endif
6138 if (DEFAULT_ABI == ABI_ELFv2)
6139 fprintf (file, "\t.abiversion 2\n");
6143 /* Return nonzero if this function is known to have a null epilogue. */
6146 direct_return (void)
6148 if (reload_completed)
6150 rs6000_stack_t *info = rs6000_stack_info ();
6152 if (info->first_gp_reg_save == 32
6153 && info->first_fp_reg_save == 64
6154 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6155 && ! info->lr_save_p
6156 && ! info->cr_save_p
6157 && info->vrsave_size == 0
6158 && ! info->push_p)
6159 return 1;
6162 return 0;
6165 /* Return the number of instructions it takes to form a constant in an
6166 integer register. */
6169 num_insns_constant_wide (HOST_WIDE_INT value)
6171 /* signed constant loadable with addi */
6172 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6173 return 1;
6175 /* constant loadable with addis */
6176 else if ((value & 0xffff) == 0
6177 && (value >> 31 == -1 || value >> 31 == 0))
6178 return 1;
6180 else if (TARGET_POWERPC64)
6182 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6183 HOST_WIDE_INT high = value >> 31;
6185 if (high == 0 || high == -1)
6186 return 2;
6188 high >>= 1;
6190 if (low == 0)
6191 return num_insns_constant_wide (high) + 1;
6192 else if (high == 0)
6193 return num_insns_constant_wide (low) + 1;
6194 else
6195 return (num_insns_constant_wide (high)
6196 + num_insns_constant_wide (low) + 1);
6199 else
6200 return 2;
6204 num_insns_constant (rtx op, machine_mode mode)
6206 HOST_WIDE_INT low, high;
6208 switch (GET_CODE (op))
6210 case CONST_INT:
6211 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6212 && rs6000_is_valid_and_mask (op, mode))
6213 return 2;
6214 else
6215 return num_insns_constant_wide (INTVAL (op));
6217 case CONST_WIDE_INT:
6219 int i;
6220 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6221 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6222 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6223 return ins;
6226 case CONST_DOUBLE:
6227 if (mode == SFmode || mode == SDmode)
6229 long l;
6231 if (DECIMAL_FLOAT_MODE_P (mode))
6232 REAL_VALUE_TO_TARGET_DECIMAL32
6233 (*CONST_DOUBLE_REAL_VALUE (op), l);
6234 else
6235 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6236 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6239 long l[2];
6240 if (DECIMAL_FLOAT_MODE_P (mode))
6241 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6242 else
6243 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6244 high = l[WORDS_BIG_ENDIAN == 0];
6245 low = l[WORDS_BIG_ENDIAN != 0];
6247 if (TARGET_32BIT)
6248 return (num_insns_constant_wide (low)
6249 + num_insns_constant_wide (high));
6250 else
6252 if ((high == 0 && low >= 0)
6253 || (high == -1 && low < 0))
6254 return num_insns_constant_wide (low);
6256 else if (rs6000_is_valid_and_mask (op, mode))
6257 return 2;
6259 else if (low == 0)
6260 return num_insns_constant_wide (high) + 1;
6262 else
6263 return (num_insns_constant_wide (high)
6264 + num_insns_constant_wide (low) + 1);
6267 default:
6268 gcc_unreachable ();
6272 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6273 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6274 corresponding element of the vector, but for V4SFmode and V2SFmode,
6275 the corresponding "float" is interpreted as an SImode integer. */
6277 HOST_WIDE_INT
6278 const_vector_elt_as_int (rtx op, unsigned int elt)
6280 rtx tmp;
6282 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6283 gcc_assert (GET_MODE (op) != V2DImode
6284 && GET_MODE (op) != V2DFmode);
6286 tmp = CONST_VECTOR_ELT (op, elt);
6287 if (GET_MODE (op) == V4SFmode
6288 || GET_MODE (op) == V2SFmode)
6289 tmp = gen_lowpart (SImode, tmp);
6290 return INTVAL (tmp);
6293 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6294 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6295 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6296 all items are set to the same value and contain COPIES replicas of the
6297 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6298 operand and the others are set to the value of the operand's msb. */
6300 static bool
6301 vspltis_constant (rtx op, unsigned step, unsigned copies)
6303 machine_mode mode = GET_MODE (op);
6304 machine_mode inner = GET_MODE_INNER (mode);
6306 unsigned i;
6307 unsigned nunits;
6308 unsigned bitsize;
6309 unsigned mask;
6311 HOST_WIDE_INT val;
6312 HOST_WIDE_INT splat_val;
6313 HOST_WIDE_INT msb_val;
6315 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6316 return false;
6318 nunits = GET_MODE_NUNITS (mode);
6319 bitsize = GET_MODE_BITSIZE (inner);
6320 mask = GET_MODE_MASK (inner);
6322 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6323 splat_val = val;
6324 msb_val = val >= 0 ? 0 : -1;
6326 /* Construct the value to be splatted, if possible. If not, return 0. */
6327 for (i = 2; i <= copies; i *= 2)
6329 HOST_WIDE_INT small_val;
6330 bitsize /= 2;
6331 small_val = splat_val >> bitsize;
6332 mask >>= bitsize;
6333 if (splat_val != ((HOST_WIDE_INT)
6334 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6335 | (small_val & mask)))
6336 return false;
6337 splat_val = small_val;
6340 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6341 if (EASY_VECTOR_15 (splat_val))
6344 /* Also check if we can splat, and then add the result to itself. Do so if
6345 the value is positive, of if the splat instruction is using OP's mode;
6346 for splat_val < 0, the splat and the add should use the same mode. */
6347 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6348 && (splat_val >= 0 || (step == 1 && copies == 1)))
6351 /* Also check if are loading up the most significant bit which can be done by
6352 loading up -1 and shifting the value left by -1. */
6353 else if (EASY_VECTOR_MSB (splat_val, inner))
6356 else
6357 return false;
6359 /* Check if VAL is present in every STEP-th element, and the
6360 other elements are filled with its most significant bit. */
6361 for (i = 1; i < nunits; ++i)
6363 HOST_WIDE_INT desired_val;
6364 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6365 if ((i & (step - 1)) == 0)
6366 desired_val = val;
6367 else
6368 desired_val = msb_val;
6370 if (desired_val != const_vector_elt_as_int (op, elt))
6371 return false;
6374 return true;
6377 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6378 instruction, filling in the bottom elements with 0 or -1.
6380 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6381 for the number of zeroes to shift in, or negative for the number of 0xff
6382 bytes to shift in.
6384 OP is a CONST_VECTOR. */
6387 vspltis_shifted (rtx op)
6389 machine_mode mode = GET_MODE (op);
6390 machine_mode inner = GET_MODE_INNER (mode);
6392 unsigned i, j;
6393 unsigned nunits;
6394 unsigned mask;
6396 HOST_WIDE_INT val;
6398 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6399 return false;
6401 /* We need to create pseudo registers to do the shift, so don't recognize
6402 shift vector constants after reload. */
6403 if (!can_create_pseudo_p ())
6404 return false;
6406 nunits = GET_MODE_NUNITS (mode);
6407 mask = GET_MODE_MASK (inner);
6409 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6411 /* Check if the value can really be the operand of a vspltis[bhw]. */
6412 if (EASY_VECTOR_15 (val))
6415 /* Also check if we are loading up the most significant bit which can be done
6416 by loading up -1 and shifting the value left by -1. */
6417 else if (EASY_VECTOR_MSB (val, inner))
6420 else
6421 return 0;
6423 /* Check if VAL is present in every STEP-th element until we find elements
6424 that are 0 or all 1 bits. */
6425 for (i = 1; i < nunits; ++i)
6427 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6428 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6430 /* If the value isn't the splat value, check for the remaining elements
6431 being 0/-1. */
6432 if (val != elt_val)
6434 if (elt_val == 0)
6436 for (j = i+1; j < nunits; ++j)
6438 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6439 if (const_vector_elt_as_int (op, elt2) != 0)
6440 return 0;
6443 return (nunits - i) * GET_MODE_SIZE (inner);
6446 else if ((elt_val & mask) == mask)
6448 for (j = i+1; j < nunits; ++j)
6450 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6451 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6452 return 0;
6455 return -((nunits - i) * GET_MODE_SIZE (inner));
6458 else
6459 return 0;
6463 /* If all elements are equal, we don't need to do VLSDOI. */
6464 return 0;
6468 /* Return true if OP is of the given MODE and can be synthesized
6469 with a vspltisb, vspltish or vspltisw. */
6471 bool
6472 easy_altivec_constant (rtx op, machine_mode mode)
6474 unsigned step, copies;
6476 if (mode == VOIDmode)
6477 mode = GET_MODE (op);
6478 else if (mode != GET_MODE (op))
6479 return false;
6481 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6482 constants. */
6483 if (mode == V2DFmode)
6484 return zero_constant (op, mode);
6486 else if (mode == V2DImode)
6488 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6489 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6490 return false;
6492 if (zero_constant (op, mode))
6493 return true;
6495 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6496 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6497 return true;
6499 return false;
6502 /* V1TImode is a special container for TImode. Ignore for now. */
6503 else if (mode == V1TImode)
6504 return false;
6506 /* Start with a vspltisw. */
6507 step = GET_MODE_NUNITS (mode) / 4;
6508 copies = 1;
6510 if (vspltis_constant (op, step, copies))
6511 return true;
6513 /* Then try with a vspltish. */
6514 if (step == 1)
6515 copies <<= 1;
6516 else
6517 step >>= 1;
6519 if (vspltis_constant (op, step, copies))
6520 return true;
6522 /* And finally a vspltisb. */
6523 if (step == 1)
6524 copies <<= 1;
6525 else
6526 step >>= 1;
6528 if (vspltis_constant (op, step, copies))
6529 return true;
6531 if (vspltis_shifted (op) != 0)
6532 return true;
6534 return false;
6537 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6538 result is OP. Abort if it is not possible. */
6541 gen_easy_altivec_constant (rtx op)
6543 machine_mode mode = GET_MODE (op);
6544 int nunits = GET_MODE_NUNITS (mode);
6545 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6546 unsigned step = nunits / 4;
6547 unsigned copies = 1;
6549 /* Start with a vspltisw. */
6550 if (vspltis_constant (op, step, copies))
6551 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6553 /* Then try with a vspltish. */
6554 if (step == 1)
6555 copies <<= 1;
6556 else
6557 step >>= 1;
6559 if (vspltis_constant (op, step, copies))
6560 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6562 /* And finally a vspltisb. */
6563 if (step == 1)
6564 copies <<= 1;
6565 else
6566 step >>= 1;
6568 if (vspltis_constant (op, step, copies))
6569 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6571 gcc_unreachable ();
6574 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6575 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6577 Return the number of instructions needed (1 or 2) into the address pointed
6578 via NUM_INSNS_PTR.
6580 Return the constant that is being split via CONSTANT_PTR. */
6582 bool
6583 xxspltib_constant_p (rtx op,
6584 machine_mode mode,
6585 int *num_insns_ptr,
6586 int *constant_ptr)
6588 size_t nunits = GET_MODE_NUNITS (mode);
6589 size_t i;
6590 HOST_WIDE_INT value;
6591 rtx element;
6593 /* Set the returned values to out of bound values. */
6594 *num_insns_ptr = -1;
6595 *constant_ptr = 256;
6597 if (!TARGET_P9_VECTOR)
6598 return false;
6600 if (mode == VOIDmode)
6601 mode = GET_MODE (op);
6603 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6604 return false;
6606 /* Handle (vec_duplicate <constant>). */
6607 if (GET_CODE (op) == VEC_DUPLICATE)
6609 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6610 && mode != V2DImode)
6611 return false;
6613 element = XEXP (op, 0);
6614 if (!CONST_INT_P (element))
6615 return false;
6617 value = INTVAL (element);
6618 if (!IN_RANGE (value, -128, 127))
6619 return false;
6622 /* Handle (const_vector [...]). */
6623 else if (GET_CODE (op) == CONST_VECTOR)
6625 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6626 && mode != V2DImode)
6627 return false;
6629 element = CONST_VECTOR_ELT (op, 0);
6630 if (!CONST_INT_P (element))
6631 return false;
6633 value = INTVAL (element);
6634 if (!IN_RANGE (value, -128, 127))
6635 return false;
6637 for (i = 1; i < nunits; i++)
6639 element = CONST_VECTOR_ELT (op, i);
6640 if (!CONST_INT_P (element))
6641 return false;
6643 if (value != INTVAL (element))
6644 return false;
6648 /* Handle integer constants being loaded into the upper part of the VSX
6649 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6650 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6651 else if (CONST_INT_P (op))
6653 if (!SCALAR_INT_MODE_P (mode))
6654 return false;
6656 value = INTVAL (op);
6657 if (!IN_RANGE (value, -128, 127))
6658 return false;
6660 if (!IN_RANGE (value, -1, 0))
6662 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6663 return false;
6665 if (EASY_VECTOR_15 (value))
6666 return false;
6670 else
6671 return false;
6673 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6674 sign extend. Special case 0/-1 to allow getting any VSX register instead
6675 of an Altivec register. */
6676 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6677 && EASY_VECTOR_15 (value))
6678 return false;
6680 /* Return # of instructions and the constant byte for XXSPLTIB. */
6681 if (mode == V16QImode)
6682 *num_insns_ptr = 1;
6684 else if (IN_RANGE (value, -1, 0))
6685 *num_insns_ptr = 1;
6687 else
6688 *num_insns_ptr = 2;
6690 *constant_ptr = (int) value;
6691 return true;
6694 const char *
6695 output_vec_const_move (rtx *operands)
6697 int shift;
6698 machine_mode mode;
6699 rtx dest, vec;
6701 dest = operands[0];
6702 vec = operands[1];
6703 mode = GET_MODE (dest);
6705 if (TARGET_VSX)
6707 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6708 int xxspltib_value = 256;
6709 int num_insns = -1;
6711 if (zero_constant (vec, mode))
6713 if (TARGET_P9_VECTOR)
6714 return "xxspltib %x0,0";
6716 else if (dest_vmx_p)
6717 return "vspltisw %0,0";
6719 else
6720 return "xxlxor %x0,%x0,%x0";
6723 if (all_ones_constant (vec, mode))
6725 if (TARGET_P9_VECTOR)
6726 return "xxspltib %x0,255";
6728 else if (dest_vmx_p)
6729 return "vspltisw %0,-1";
6731 else if (TARGET_P8_VECTOR)
6732 return "xxlorc %x0,%x0,%x0";
6734 else
6735 gcc_unreachable ();
6738 if (TARGET_P9_VECTOR
6739 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6741 if (num_insns == 1)
6743 operands[2] = GEN_INT (xxspltib_value & 0xff);
6744 return "xxspltib %x0,%2";
6747 return "#";
6751 if (TARGET_ALTIVEC)
6753 rtx splat_vec;
6755 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6756 if (zero_constant (vec, mode))
6757 return "vspltisw %0,0";
6759 if (all_ones_constant (vec, mode))
6760 return "vspltisw %0,-1";
6762 /* Do we need to construct a value using VSLDOI? */
6763 shift = vspltis_shifted (vec);
6764 if (shift != 0)
6765 return "#";
6767 splat_vec = gen_easy_altivec_constant (vec);
6768 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6769 operands[1] = XEXP (splat_vec, 0);
6770 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6771 return "#";
6773 switch (GET_MODE (splat_vec))
6775 case E_V4SImode:
6776 return "vspltisw %0,%1";
6778 case E_V8HImode:
6779 return "vspltish %0,%1";
6781 case E_V16QImode:
6782 return "vspltisb %0,%1";
6784 default:
6785 gcc_unreachable ();
6789 gcc_unreachable ();
6792 /* Initialize TARGET of vector PAIRED to VALS. */
6794 void
6795 paired_expand_vector_init (rtx target, rtx vals)
6797 machine_mode mode = GET_MODE (target);
6798 int n_elts = GET_MODE_NUNITS (mode);
6799 int n_var = 0;
6800 rtx x, new_rtx, tmp, constant_op, op1, op2;
6801 int i;
6803 for (i = 0; i < n_elts; ++i)
6805 x = XVECEXP (vals, 0, i);
6806 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6807 ++n_var;
6809 if (n_var == 0)
6811 /* Load from constant pool. */
6812 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6813 return;
6816 if (n_var == 2)
6818 /* The vector is initialized only with non-constants. */
6819 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6820 XVECEXP (vals, 0, 1));
6822 emit_move_insn (target, new_rtx);
6823 return;
6826 /* One field is non-constant and the other one is a constant. Load the
6827 constant from the constant pool and use ps_merge instruction to
6828 construct the whole vector. */
6829 op1 = XVECEXP (vals, 0, 0);
6830 op2 = XVECEXP (vals, 0, 1);
6832 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6834 tmp = gen_reg_rtx (GET_MODE (constant_op));
6835 emit_move_insn (tmp, constant_op);
6837 if (CONSTANT_P (op1))
6838 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6839 else
6840 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6842 emit_move_insn (target, new_rtx);
6845 void
6846 paired_expand_vector_move (rtx operands[])
6848 rtx op0 = operands[0], op1 = operands[1];
6850 emit_move_insn (op0, op1);
6853 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6854 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6855 operands for the relation operation COND. This is a recursive
6856 function. */
6858 static void
6859 paired_emit_vector_compare (enum rtx_code rcode,
6860 rtx dest, rtx op0, rtx op1,
6861 rtx cc_op0, rtx cc_op1)
6863 rtx tmp = gen_reg_rtx (V2SFmode);
6864 rtx tmp1, max, min;
6866 gcc_assert (TARGET_PAIRED_FLOAT);
6867 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6869 switch (rcode)
6871 case LT:
6872 case LTU:
6873 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6874 return;
6875 case GE:
6876 case GEU:
6877 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6878 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6879 return;
6880 case LE:
6881 case LEU:
6882 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6883 return;
6884 case GT:
6885 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6886 return;
6887 case EQ:
6888 tmp1 = gen_reg_rtx (V2SFmode);
6889 max = gen_reg_rtx (V2SFmode);
6890 min = gen_reg_rtx (V2SFmode);
6891 gen_reg_rtx (V2SFmode);
6893 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6894 emit_insn (gen_selv2sf4
6895 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6896 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6897 emit_insn (gen_selv2sf4
6898 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6899 emit_insn (gen_subv2sf3 (tmp1, min, max));
6900 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6901 return;
6902 case NE:
6903 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6904 return;
6905 case UNLE:
6906 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6907 return;
6908 case UNLT:
6909 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6910 return;
6911 case UNGE:
6912 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6913 return;
6914 case UNGT:
6915 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6916 return;
6917 default:
6918 gcc_unreachable ();
6921 return;
6924 /* Emit vector conditional expression.
6925 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6926 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6929 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6930 rtx cond, rtx cc_op0, rtx cc_op1)
6932 enum rtx_code rcode = GET_CODE (cond);
6934 if (!TARGET_PAIRED_FLOAT)
6935 return 0;
6937 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6939 return 1;
6942 /* Initialize vector TARGET to VALS. */
6944 void
6945 rs6000_expand_vector_init (rtx target, rtx vals)
6947 machine_mode mode = GET_MODE (target);
6948 machine_mode inner_mode = GET_MODE_INNER (mode);
6949 int n_elts = GET_MODE_NUNITS (mode);
6950 int n_var = 0, one_var = -1;
6951 bool all_same = true, all_const_zero = true;
6952 rtx x, mem;
6953 int i;
6955 for (i = 0; i < n_elts; ++i)
6957 x = XVECEXP (vals, 0, i);
6958 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6959 ++n_var, one_var = i;
6960 else if (x != CONST0_RTX (inner_mode))
6961 all_const_zero = false;
6963 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6964 all_same = false;
6967 if (n_var == 0)
6969 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6970 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6971 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6973 /* Zero register. */
6974 emit_move_insn (target, CONST0_RTX (mode));
6975 return;
6977 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6979 /* Splat immediate. */
6980 emit_insn (gen_rtx_SET (target, const_vec));
6981 return;
6983 else
6985 /* Load from constant pool. */
6986 emit_move_insn (target, const_vec);
6987 return;
6991 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6992 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6994 rtx op[2];
6995 size_t i;
6996 size_t num_elements = all_same ? 1 : 2;
6997 for (i = 0; i < num_elements; i++)
6999 op[i] = XVECEXP (vals, 0, i);
7000 /* Just in case there is a SUBREG with a smaller mode, do a
7001 conversion. */
7002 if (GET_MODE (op[i]) != inner_mode)
7004 rtx tmp = gen_reg_rtx (inner_mode);
7005 convert_move (tmp, op[i], 0);
7006 op[i] = tmp;
7008 /* Allow load with splat double word. */
7009 else if (MEM_P (op[i]))
7011 if (!all_same)
7012 op[i] = force_reg (inner_mode, op[i]);
7014 else if (!REG_P (op[i]))
7015 op[i] = force_reg (inner_mode, op[i]);
7018 if (all_same)
7020 if (mode == V2DFmode)
7021 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7022 else
7023 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7025 else
7027 if (mode == V2DFmode)
7028 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7029 else
7030 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7032 return;
7035 /* Special case initializing vector int if we are on 64-bit systems with
7036 direct move or we have the ISA 3.0 instructions. */
7037 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7038 && TARGET_DIRECT_MOVE_64BIT)
7040 if (all_same)
7042 rtx element0 = XVECEXP (vals, 0, 0);
7043 if (MEM_P (element0))
7044 element0 = rs6000_address_for_fpconvert (element0);
7045 else
7046 element0 = force_reg (SImode, element0);
7048 if (TARGET_P9_VECTOR)
7049 emit_insn (gen_vsx_splat_v4si (target, element0));
7050 else
7052 rtx tmp = gen_reg_rtx (DImode);
7053 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7054 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7056 return;
7058 else
7060 rtx elements[4];
7061 size_t i;
7063 for (i = 0; i < 4; i++)
7065 elements[i] = XVECEXP (vals, 0, i);
7066 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7067 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7070 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7071 elements[2], elements[3]));
7072 return;
7076 /* With single precision floating point on VSX, know that internally single
7077 precision is actually represented as a double, and either make 2 V2DF
7078 vectors, and convert these vectors to single precision, or do one
7079 conversion, and splat the result to the other elements. */
7080 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7082 if (all_same)
7084 rtx element0 = XVECEXP (vals, 0, 0);
7086 if (TARGET_P9_VECTOR)
7088 if (MEM_P (element0))
7089 element0 = rs6000_address_for_fpconvert (element0);
7091 emit_insn (gen_vsx_splat_v4sf (target, element0));
7094 else
7096 rtx freg = gen_reg_rtx (V4SFmode);
7097 rtx sreg = force_reg (SFmode, element0);
7098 rtx cvt = (TARGET_XSCVDPSPN
7099 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7100 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7102 emit_insn (cvt);
7103 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7104 const0_rtx));
7107 else
7109 rtx dbl_even = gen_reg_rtx (V2DFmode);
7110 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7111 rtx flt_even = gen_reg_rtx (V4SFmode);
7112 rtx flt_odd = gen_reg_rtx (V4SFmode);
7113 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7114 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7115 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7116 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7118 /* Use VMRGEW if we can instead of doing a permute. */
7119 if (TARGET_P8_VECTOR)
7121 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7122 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7123 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7124 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7125 if (BYTES_BIG_ENDIAN)
7126 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7127 else
7128 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7130 else
7132 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7133 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7134 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7135 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7136 rs6000_expand_extract_even (target, flt_even, flt_odd);
7139 return;
7142 /* Special case initializing vector short/char that are splats if we are on
7143 64-bit systems with direct move. */
7144 if (all_same && TARGET_DIRECT_MOVE_64BIT
7145 && (mode == V16QImode || mode == V8HImode))
7147 rtx op0 = XVECEXP (vals, 0, 0);
7148 rtx di_tmp = gen_reg_rtx (DImode);
7150 if (!REG_P (op0))
7151 op0 = force_reg (GET_MODE_INNER (mode), op0);
7153 if (mode == V16QImode)
7155 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7156 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7157 return;
7160 if (mode == V8HImode)
7162 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7163 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7164 return;
7168 /* Store value to stack temp. Load vector element. Splat. However, splat
7169 of 64-bit items is not supported on Altivec. */
7170 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7172 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7173 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7174 XVECEXP (vals, 0, 0));
7175 x = gen_rtx_UNSPEC (VOIDmode,
7176 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7177 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7178 gen_rtvec (2,
7179 gen_rtx_SET (target, mem),
7180 x)));
7181 x = gen_rtx_VEC_SELECT (inner_mode, target,
7182 gen_rtx_PARALLEL (VOIDmode,
7183 gen_rtvec (1, const0_rtx)));
7184 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7185 return;
7188 /* One field is non-constant. Load constant then overwrite
7189 varying field. */
7190 if (n_var == 1)
7192 rtx copy = copy_rtx (vals);
7194 /* Load constant part of vector, substitute neighboring value for
7195 varying element. */
7196 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7197 rs6000_expand_vector_init (target, copy);
7199 /* Insert variable. */
7200 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7201 return;
7204 /* Construct the vector in memory one field at a time
7205 and load the whole vector. */
7206 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7207 for (i = 0; i < n_elts; i++)
7208 emit_move_insn (adjust_address_nv (mem, inner_mode,
7209 i * GET_MODE_SIZE (inner_mode)),
7210 XVECEXP (vals, 0, i));
7211 emit_move_insn (target, mem);
7214 /* Set field ELT of TARGET to VAL. */
7216 void
7217 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7219 machine_mode mode = GET_MODE (target);
7220 machine_mode inner_mode = GET_MODE_INNER (mode);
7221 rtx reg = gen_reg_rtx (mode);
7222 rtx mask, mem, x;
7223 int width = GET_MODE_SIZE (inner_mode);
7224 int i;
7226 val = force_reg (GET_MODE (val), val);
7228 if (VECTOR_MEM_VSX_P (mode))
7230 rtx insn = NULL_RTX;
7231 rtx elt_rtx = GEN_INT (elt);
7233 if (mode == V2DFmode)
7234 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7236 else if (mode == V2DImode)
7237 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7239 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7241 if (mode == V4SImode)
7242 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7243 else if (mode == V8HImode)
7244 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7245 else if (mode == V16QImode)
7246 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7247 else if (mode == V4SFmode)
7248 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7251 if (insn)
7253 emit_insn (insn);
7254 return;
7258 /* Simplify setting single element vectors like V1TImode. */
7259 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7261 emit_move_insn (target, gen_lowpart (mode, val));
7262 return;
7265 /* Load single variable value. */
7266 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7267 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7268 x = gen_rtx_UNSPEC (VOIDmode,
7269 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7270 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7271 gen_rtvec (2,
7272 gen_rtx_SET (reg, mem),
7273 x)));
7275 /* Linear sequence. */
7276 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7277 for (i = 0; i < 16; ++i)
7278 XVECEXP (mask, 0, i) = GEN_INT (i);
7280 /* Set permute mask to insert element into target. */
7281 for (i = 0; i < width; ++i)
7282 XVECEXP (mask, 0, elt*width + i)
7283 = GEN_INT (i + 0x10);
7284 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7286 if (BYTES_BIG_ENDIAN)
7287 x = gen_rtx_UNSPEC (mode,
7288 gen_rtvec (3, target, reg,
7289 force_reg (V16QImode, x)),
7290 UNSPEC_VPERM);
7291 else
7293 if (TARGET_P9_VECTOR)
7294 x = gen_rtx_UNSPEC (mode,
7295 gen_rtvec (3, target, reg,
7296 force_reg (V16QImode, x)),
7297 UNSPEC_VPERMR);
7298 else
7300 /* Invert selector. We prefer to generate VNAND on P8 so
7301 that future fusion opportunities can kick in, but must
7302 generate VNOR elsewhere. */
7303 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7304 rtx iorx = (TARGET_P8_VECTOR
7305 ? gen_rtx_IOR (V16QImode, notx, notx)
7306 : gen_rtx_AND (V16QImode, notx, notx));
7307 rtx tmp = gen_reg_rtx (V16QImode);
7308 emit_insn (gen_rtx_SET (tmp, iorx));
7310 /* Permute with operands reversed and adjusted selector. */
7311 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7312 UNSPEC_VPERM);
7316 emit_insn (gen_rtx_SET (target, x));
7319 /* Extract field ELT from VEC into TARGET. */
7321 void
7322 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7324 machine_mode mode = GET_MODE (vec);
7325 machine_mode inner_mode = GET_MODE_INNER (mode);
7326 rtx mem;
7328 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7330 switch (mode)
7332 default:
7333 break;
7334 case E_V1TImode:
7335 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7336 emit_move_insn (target, gen_lowpart (TImode, vec));
7337 break;
7338 case E_V2DFmode:
7339 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7340 return;
7341 case E_V2DImode:
7342 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7343 return;
7344 case E_V4SFmode:
7345 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7346 return;
7347 case E_V16QImode:
7348 if (TARGET_DIRECT_MOVE_64BIT)
7350 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7351 return;
7353 else
7354 break;
7355 case E_V8HImode:
7356 if (TARGET_DIRECT_MOVE_64BIT)
7358 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7359 return;
7361 else
7362 break;
7363 case E_V4SImode:
7364 if (TARGET_DIRECT_MOVE_64BIT)
7366 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7367 return;
7369 break;
7372 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7373 && TARGET_DIRECT_MOVE_64BIT)
7375 if (GET_MODE (elt) != DImode)
7377 rtx tmp = gen_reg_rtx (DImode);
7378 convert_move (tmp, elt, 0);
7379 elt = tmp;
7381 else if (!REG_P (elt))
7382 elt = force_reg (DImode, elt);
7384 switch (mode)
7386 case E_V2DFmode:
7387 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7388 return;
7390 case E_V2DImode:
7391 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7392 return;
7394 case E_V4SFmode:
7395 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7396 return;
7398 case E_V4SImode:
7399 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7400 return;
7402 case E_V8HImode:
7403 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7404 return;
7406 case E_V16QImode:
7407 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7408 return;
7410 default:
7411 gcc_unreachable ();
7415 gcc_assert (CONST_INT_P (elt));
7417 /* Allocate mode-sized buffer. */
7418 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7420 emit_move_insn (mem, vec);
7422 /* Add offset to field within buffer matching vector element. */
7423 mem = adjust_address_nv (mem, inner_mode,
7424 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7426 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7429 /* Helper function to return the register number of a RTX. */
7430 static inline int
7431 regno_or_subregno (rtx op)
7433 if (REG_P (op))
7434 return REGNO (op);
7435 else if (SUBREG_P (op))
7436 return subreg_regno (op);
7437 else
7438 gcc_unreachable ();
7441 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7442 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7443 temporary (BASE_TMP) to fixup the address. Return the new memory address
7444 that is valid for reads or writes to a given register (SCALAR_REG). */
7447 rs6000_adjust_vec_address (rtx scalar_reg,
7448 rtx mem,
7449 rtx element,
7450 rtx base_tmp,
7451 machine_mode scalar_mode)
7453 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7454 rtx addr = XEXP (mem, 0);
7455 rtx element_offset;
7456 rtx new_addr;
7457 bool valid_addr_p;
7459 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7460 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7462 /* Calculate what we need to add to the address to get the element
7463 address. */
7464 if (CONST_INT_P (element))
7465 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7466 else
7468 int byte_shift = exact_log2 (scalar_size);
7469 gcc_assert (byte_shift >= 0);
7471 if (byte_shift == 0)
7472 element_offset = element;
7474 else
7476 if (TARGET_POWERPC64)
7477 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7478 else
7479 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7481 element_offset = base_tmp;
7485 /* Create the new address pointing to the element within the vector. If we
7486 are adding 0, we don't have to change the address. */
7487 if (element_offset == const0_rtx)
7488 new_addr = addr;
7490 /* A simple indirect address can be converted into a reg + offset
7491 address. */
7492 else if (REG_P (addr) || SUBREG_P (addr))
7493 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7495 /* Optimize D-FORM addresses with constant offset with a constant element, to
7496 include the element offset in the address directly. */
7497 else if (GET_CODE (addr) == PLUS)
7499 rtx op0 = XEXP (addr, 0);
7500 rtx op1 = XEXP (addr, 1);
7501 rtx insn;
7503 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7504 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7506 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7507 rtx offset_rtx = GEN_INT (offset);
7509 if (IN_RANGE (offset, -32768, 32767)
7510 && (scalar_size < 8 || (offset & 0x3) == 0))
7511 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7512 else
7514 emit_move_insn (base_tmp, offset_rtx);
7515 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7518 else
7520 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7521 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7523 /* Note, ADDI requires the register being added to be a base
7524 register. If the register was R0, load it up into the temporary
7525 and do the add. */
7526 if (op1_reg_p
7527 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7529 insn = gen_add3_insn (base_tmp, op1, element_offset);
7530 gcc_assert (insn != NULL_RTX);
7531 emit_insn (insn);
7534 else if (ele_reg_p
7535 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7537 insn = gen_add3_insn (base_tmp, element_offset, op1);
7538 gcc_assert (insn != NULL_RTX);
7539 emit_insn (insn);
7542 else
7544 emit_move_insn (base_tmp, op1);
7545 emit_insn (gen_add2_insn (base_tmp, element_offset));
7548 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7552 else
7554 emit_move_insn (base_tmp, addr);
7555 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7558 /* If we have a PLUS, we need to see whether the particular register class
7559 allows for D-FORM or X-FORM addressing. */
7560 if (GET_CODE (new_addr) == PLUS)
7562 rtx op1 = XEXP (new_addr, 1);
7563 addr_mask_type addr_mask;
7564 int scalar_regno = regno_or_subregno (scalar_reg);
7566 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7567 if (INT_REGNO_P (scalar_regno))
7568 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7570 else if (FP_REGNO_P (scalar_regno))
7571 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7573 else if (ALTIVEC_REGNO_P (scalar_regno))
7574 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7576 else
7577 gcc_unreachable ();
7579 if (REG_P (op1) || SUBREG_P (op1))
7580 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7581 else
7582 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7585 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7586 valid_addr_p = true;
7588 else
7589 valid_addr_p = false;
7591 if (!valid_addr_p)
7593 emit_move_insn (base_tmp, new_addr);
7594 new_addr = base_tmp;
7597 return change_address (mem, scalar_mode, new_addr);
7600 /* Split a variable vec_extract operation into the component instructions. */
7602 void
7603 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7604 rtx tmp_altivec)
7606 machine_mode mode = GET_MODE (src);
7607 machine_mode scalar_mode = GET_MODE (dest);
7608 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7609 int byte_shift = exact_log2 (scalar_size);
7611 gcc_assert (byte_shift >= 0);
7613 /* If we are given a memory address, optimize to load just the element. We
7614 don't have to adjust the vector element number on little endian
7615 systems. */
7616 if (MEM_P (src))
7618 gcc_assert (REG_P (tmp_gpr));
7619 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7620 tmp_gpr, scalar_mode));
7621 return;
7624 else if (REG_P (src) || SUBREG_P (src))
7626 int bit_shift = byte_shift + 3;
7627 rtx element2;
7628 int dest_regno = regno_or_subregno (dest);
7629 int src_regno = regno_or_subregno (src);
7630 int element_regno = regno_or_subregno (element);
7632 gcc_assert (REG_P (tmp_gpr));
7634 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7635 a general purpose register. */
7636 if (TARGET_P9_VECTOR
7637 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7638 && INT_REGNO_P (dest_regno)
7639 && ALTIVEC_REGNO_P (src_regno)
7640 && INT_REGNO_P (element_regno))
7642 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7643 rtx element_si = gen_rtx_REG (SImode, element_regno);
7645 if (mode == V16QImode)
7646 emit_insn (VECTOR_ELT_ORDER_BIG
7647 ? gen_vextublx (dest_si, element_si, src)
7648 : gen_vextubrx (dest_si, element_si, src));
7650 else if (mode == V8HImode)
7652 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7653 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7654 emit_insn (VECTOR_ELT_ORDER_BIG
7655 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7656 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7660 else
7662 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7663 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7664 emit_insn (VECTOR_ELT_ORDER_BIG
7665 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7666 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7669 return;
7673 gcc_assert (REG_P (tmp_altivec));
7675 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7676 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7677 will shift the element into the upper position (adding 3 to convert a
7678 byte shift into a bit shift). */
7679 if (scalar_size == 8)
7681 if (!VECTOR_ELT_ORDER_BIG)
7683 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7684 element2 = tmp_gpr;
7686 else
7687 element2 = element;
7689 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7690 bit. */
7691 emit_insn (gen_rtx_SET (tmp_gpr,
7692 gen_rtx_AND (DImode,
7693 gen_rtx_ASHIFT (DImode,
7694 element2,
7695 GEN_INT (6)),
7696 GEN_INT (64))));
7698 else
7700 if (!VECTOR_ELT_ORDER_BIG)
7702 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7704 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7705 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7706 element2 = tmp_gpr;
7708 else
7709 element2 = element;
7711 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7714 /* Get the value into the lower byte of the Altivec register where VSLO
7715 expects it. */
7716 if (TARGET_P9_VECTOR)
7717 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7718 else if (can_create_pseudo_p ())
7719 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7720 else
7722 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7723 emit_move_insn (tmp_di, tmp_gpr);
7724 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7727 /* Do the VSLO to get the value into the final location. */
7728 switch (mode)
7730 case E_V2DFmode:
7731 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7732 return;
7734 case E_V2DImode:
7735 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7736 return;
7738 case E_V4SFmode:
7740 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7741 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7742 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7743 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7744 tmp_altivec));
7746 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7747 return;
7750 case E_V4SImode:
7751 case E_V8HImode:
7752 case E_V16QImode:
7754 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7755 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7756 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7757 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7758 tmp_altivec));
7759 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7760 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7761 GEN_INT (64 - (8 * scalar_size))));
7762 return;
7765 default:
7766 gcc_unreachable ();
7769 return;
7771 else
7772 gcc_unreachable ();
7775 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7776 two SImode values. */
7778 static void
7779 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7781 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7783 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7785 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7786 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7788 emit_move_insn (dest, GEN_INT (const1 | const2));
7789 return;
7792 /* Put si1 into upper 32-bits of dest. */
7793 if (CONST_INT_P (si1))
7794 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7795 else
7797 /* Generate RLDIC. */
7798 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7799 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7800 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7801 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7802 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7803 emit_insn (gen_rtx_SET (dest, and_rtx));
7806 /* Put si2 into the temporary. */
7807 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7808 if (CONST_INT_P (si2))
7809 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7810 else
7811 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7813 /* Combine the two parts. */
7814 emit_insn (gen_iordi3 (dest, dest, tmp));
7815 return;
7818 /* Split a V4SI initialization. */
7820 void
7821 rs6000_split_v4si_init (rtx operands[])
7823 rtx dest = operands[0];
7825 /* Destination is a GPR, build up the two DImode parts in place. */
7826 if (REG_P (dest) || SUBREG_P (dest))
7828 int d_regno = regno_or_subregno (dest);
7829 rtx scalar1 = operands[1];
7830 rtx scalar2 = operands[2];
7831 rtx scalar3 = operands[3];
7832 rtx scalar4 = operands[4];
7833 rtx tmp1 = operands[5];
7834 rtx tmp2 = operands[6];
7836 /* Even though we only need one temporary (plus the destination, which
7837 has an early clobber constraint, try to use two temporaries, one for
7838 each double word created. That way the 2nd insn scheduling pass can
7839 rearrange things so the two parts are done in parallel. */
7840 if (BYTES_BIG_ENDIAN)
7842 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7843 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7844 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7845 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7847 else
7849 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7850 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7851 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7852 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7853 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7855 return;
7858 else
7859 gcc_unreachable ();
7862 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7863 selects whether the alignment is abi mandated, optional, or
7864 both abi and optional alignment. */
7866 unsigned int
7867 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7869 if (how != align_opt)
7871 if (TREE_CODE (type) == VECTOR_TYPE)
7873 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7875 if (align < 64)
7876 align = 64;
7878 else if (align < 128)
7879 align = 128;
7883 if (how != align_abi)
7885 if (TREE_CODE (type) == ARRAY_TYPE
7886 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7888 if (align < BITS_PER_WORD)
7889 align = BITS_PER_WORD;
7893 return align;
7896 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7897 instructions simply ignore the low bits; VSX memory instructions
7898 are aligned to 4 or 8 bytes. */
7900 static bool
7901 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7903 return (STRICT_ALIGNMENT
7904 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7905 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7906 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7907 && (int) align < VECTOR_ALIGN (mode)))));
7910 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7912 bool
7913 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7915 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7917 if (computed != 128)
7919 static bool warned;
7920 if (!warned && warn_psabi)
7922 warned = true;
7923 inform (input_location,
7924 "the layout of aggregates containing vectors with"
7925 " %d-byte alignment has changed in GCC 5",
7926 computed / BITS_PER_UNIT);
7929 /* In current GCC there is no special case. */
7930 return false;
7933 return false;
7936 /* AIX increases natural record alignment to doubleword if the first
7937 field is an FP double while the FP fields remain word aligned. */
7939 unsigned int
7940 rs6000_special_round_type_align (tree type, unsigned int computed,
7941 unsigned int specified)
7943 unsigned int align = MAX (computed, specified);
7944 tree field = TYPE_FIELDS (type);
7946 /* Skip all non field decls */
7947 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7948 field = DECL_CHAIN (field);
7950 if (field != NULL && field != type)
7952 type = TREE_TYPE (field);
7953 while (TREE_CODE (type) == ARRAY_TYPE)
7954 type = TREE_TYPE (type);
7956 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7957 align = MAX (align, 64);
7960 return align;
7963 /* Darwin increases record alignment to the natural alignment of
7964 the first field. */
7966 unsigned int
7967 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7968 unsigned int specified)
7970 unsigned int align = MAX (computed, specified);
7972 if (TYPE_PACKED (type))
7973 return align;
7975 /* Find the first field, looking down into aggregates. */
7976 do {
7977 tree field = TYPE_FIELDS (type);
7978 /* Skip all non field decls */
7979 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7980 field = DECL_CHAIN (field);
7981 if (! field)
7982 break;
7983 /* A packed field does not contribute any extra alignment. */
7984 if (DECL_PACKED (field))
7985 return align;
7986 type = TREE_TYPE (field);
7987 while (TREE_CODE (type) == ARRAY_TYPE)
7988 type = TREE_TYPE (type);
7989 } while (AGGREGATE_TYPE_P (type));
7991 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7992 align = MAX (align, TYPE_ALIGN (type));
7994 return align;
7997 /* Return 1 for an operand in small memory on V.4/eabi. */
8000 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8001 machine_mode mode ATTRIBUTE_UNUSED)
8003 #if TARGET_ELF
8004 rtx sym_ref;
8006 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8007 return 0;
8009 if (DEFAULT_ABI != ABI_V4)
8010 return 0;
8012 if (GET_CODE (op) == SYMBOL_REF)
8013 sym_ref = op;
8015 else if (GET_CODE (op) != CONST
8016 || GET_CODE (XEXP (op, 0)) != PLUS
8017 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8018 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8019 return 0;
8021 else
8023 rtx sum = XEXP (op, 0);
8024 HOST_WIDE_INT summand;
8026 /* We have to be careful here, because it is the referenced address
8027 that must be 32k from _SDA_BASE_, not just the symbol. */
8028 summand = INTVAL (XEXP (sum, 1));
8029 if (summand < 0 || summand > g_switch_value)
8030 return 0;
8032 sym_ref = XEXP (sum, 0);
8035 return SYMBOL_REF_SMALL_P (sym_ref);
8036 #else
8037 return 0;
8038 #endif
8041 /* Return true if either operand is a general purpose register. */
8043 bool
8044 gpr_or_gpr_p (rtx op0, rtx op1)
8046 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8047 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8050 /* Return true if this is a move direct operation between GPR registers and
8051 floating point/VSX registers. */
8053 bool
8054 direct_move_p (rtx op0, rtx op1)
8056 int regno0, regno1;
8058 if (!REG_P (op0) || !REG_P (op1))
8059 return false;
8061 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8062 return false;
8064 regno0 = REGNO (op0);
8065 regno1 = REGNO (op1);
8066 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8067 return false;
8069 if (INT_REGNO_P (regno0))
8070 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8072 else if (INT_REGNO_P (regno1))
8074 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8075 return true;
8077 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8078 return true;
8081 return false;
8084 /* Return true if the OFFSET is valid for the quad address instructions that
8085 use d-form (register + offset) addressing. */
8087 static inline bool
8088 quad_address_offset_p (HOST_WIDE_INT offset)
8090 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8093 /* Return true if the ADDR is an acceptable address for a quad memory
8094 operation of mode MODE (either LQ/STQ for general purpose registers, or
8095 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8096 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8097 3.0 LXV/STXV instruction. */
8099 bool
8100 quad_address_p (rtx addr, machine_mode mode, bool strict)
8102 rtx op0, op1;
8104 if (GET_MODE_SIZE (mode) != 16)
8105 return false;
8107 if (legitimate_indirect_address_p (addr, strict))
8108 return true;
8110 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8111 return false;
8113 if (GET_CODE (addr) != PLUS)
8114 return false;
8116 op0 = XEXP (addr, 0);
8117 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8118 return false;
8120 op1 = XEXP (addr, 1);
8121 if (!CONST_INT_P (op1))
8122 return false;
8124 return quad_address_offset_p (INTVAL (op1));
8127 /* Return true if this is a load or store quad operation. This function does
8128 not handle the atomic quad memory instructions. */
8130 bool
8131 quad_load_store_p (rtx op0, rtx op1)
8133 bool ret;
8135 if (!TARGET_QUAD_MEMORY)
8136 ret = false;
8138 else if (REG_P (op0) && MEM_P (op1))
8139 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8140 && quad_memory_operand (op1, GET_MODE (op1))
8141 && !reg_overlap_mentioned_p (op0, op1));
8143 else if (MEM_P (op0) && REG_P (op1))
8144 ret = (quad_memory_operand (op0, GET_MODE (op0))
8145 && quad_int_reg_operand (op1, GET_MODE (op1)));
8147 else
8148 ret = false;
8150 if (TARGET_DEBUG_ADDR)
8152 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8153 ret ? "true" : "false");
8154 debug_rtx (gen_rtx_SET (op0, op1));
8157 return ret;
8160 /* Given an address, return a constant offset term if one exists. */
8162 static rtx
8163 address_offset (rtx op)
8165 if (GET_CODE (op) == PRE_INC
8166 || GET_CODE (op) == PRE_DEC)
8167 op = XEXP (op, 0);
8168 else if (GET_CODE (op) == PRE_MODIFY
8169 || GET_CODE (op) == LO_SUM)
8170 op = XEXP (op, 1);
8172 if (GET_CODE (op) == CONST)
8173 op = XEXP (op, 0);
8175 if (GET_CODE (op) == PLUS)
8176 op = XEXP (op, 1);
8178 if (CONST_INT_P (op))
8179 return op;
8181 return NULL_RTX;
8184 /* Return true if the MEM operand is a memory operand suitable for use
8185 with a (full width, possibly multiple) gpr load/store. On
8186 powerpc64 this means the offset must be divisible by 4.
8187 Implements 'Y' constraint.
8189 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8190 a constraint function we know the operand has satisfied a suitable
8191 memory predicate. Also accept some odd rtl generated by reload
8192 (see rs6000_legitimize_reload_address for various forms). It is
8193 important that reload rtl be accepted by appropriate constraints
8194 but not by the operand predicate.
8196 Offsetting a lo_sum should not be allowed, except where we know by
8197 alignment that a 32k boundary is not crossed, but see the ???
8198 comment in rs6000_legitimize_reload_address. Note that by
8199 "offsetting" here we mean a further offset to access parts of the
8200 MEM. It's fine to have a lo_sum where the inner address is offset
8201 from a sym, since the same sym+offset will appear in the high part
8202 of the address calculation. */
8204 bool
8205 mem_operand_gpr (rtx op, machine_mode mode)
8207 unsigned HOST_WIDE_INT offset;
8208 int extra;
8209 rtx addr = XEXP (op, 0);
8211 op = address_offset (addr);
8212 if (op == NULL_RTX)
8213 return true;
8215 offset = INTVAL (op);
8216 if (TARGET_POWERPC64 && (offset & 3) != 0)
8217 return false;
8219 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8220 if (extra < 0)
8221 extra = 0;
8223 if (GET_CODE (addr) == LO_SUM)
8224 /* For lo_sum addresses, we must allow any offset except one that
8225 causes a wrap, so test only the low 16 bits. */
8226 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8228 return offset + 0x8000 < 0x10000u - extra;
8231 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8232 enforce an offset divisible by 4 even for 32-bit. */
8234 bool
8235 mem_operand_ds_form (rtx op, machine_mode mode)
8237 unsigned HOST_WIDE_INT offset;
8238 int extra;
8239 rtx addr = XEXP (op, 0);
8241 if (!offsettable_address_p (false, mode, addr))
8242 return false;
8244 op = address_offset (addr);
8245 if (op == NULL_RTX)
8246 return true;
8248 offset = INTVAL (op);
8249 if ((offset & 3) != 0)
8250 return false;
8252 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8253 if (extra < 0)
8254 extra = 0;
8256 if (GET_CODE (addr) == LO_SUM)
8257 /* For lo_sum addresses, we must allow any offset except one that
8258 causes a wrap, so test only the low 16 bits. */
8259 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8261 return offset + 0x8000 < 0x10000u - extra;
8264 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8266 static bool
8267 reg_offset_addressing_ok_p (machine_mode mode)
8269 switch (mode)
8271 case E_V16QImode:
8272 case E_V8HImode:
8273 case E_V4SFmode:
8274 case E_V4SImode:
8275 case E_V2DFmode:
8276 case E_V2DImode:
8277 case E_V1TImode:
8278 case E_TImode:
8279 case E_TFmode:
8280 case E_KFmode:
8281 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8282 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8283 a vector mode, if we want to use the VSX registers to move it around,
8284 we need to restrict ourselves to reg+reg addressing. Similarly for
8285 IEEE 128-bit floating point that is passed in a single vector
8286 register. */
8287 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8288 return mode_supports_vsx_dform_quad (mode);
8289 break;
8291 case E_V2SImode:
8292 case E_V2SFmode:
8293 /* Paired vector modes. Only reg+reg addressing is valid. */
8294 if (TARGET_PAIRED_FLOAT)
8295 return false;
8296 break;
8298 case E_SDmode:
8299 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8300 addressing for the LFIWZX and STFIWX instructions. */
8301 if (TARGET_NO_SDMODE_STACK)
8302 return false;
8303 break;
8305 default:
8306 break;
8309 return true;
8312 static bool
8313 virtual_stack_registers_memory_p (rtx op)
8315 int regnum;
8317 if (GET_CODE (op) == REG)
8318 regnum = REGNO (op);
8320 else if (GET_CODE (op) == PLUS
8321 && GET_CODE (XEXP (op, 0)) == REG
8322 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8323 regnum = REGNO (XEXP (op, 0));
8325 else
8326 return false;
8328 return (regnum >= FIRST_VIRTUAL_REGISTER
8329 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8332 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8333 is known to not straddle a 32k boundary. This function is used
8334 to determine whether -mcmodel=medium code can use TOC pointer
8335 relative addressing for OP. This means the alignment of the TOC
8336 pointer must also be taken into account, and unfortunately that is
8337 only 8 bytes. */
8339 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8340 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8341 #endif
8343 static bool
8344 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8345 machine_mode mode)
8347 tree decl;
8348 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8350 if (GET_CODE (op) != SYMBOL_REF)
8351 return false;
8353 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8354 SYMBOL_REF. */
8355 if (mode_supports_vsx_dform_quad (mode))
8356 return false;
8358 dsize = GET_MODE_SIZE (mode);
8359 decl = SYMBOL_REF_DECL (op);
8360 if (!decl)
8362 if (dsize == 0)
8363 return false;
8365 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8366 replacing memory addresses with an anchor plus offset. We
8367 could find the decl by rummaging around in the block->objects
8368 VEC for the given offset but that seems like too much work. */
8369 dalign = BITS_PER_UNIT;
8370 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8371 && SYMBOL_REF_ANCHOR_P (op)
8372 && SYMBOL_REF_BLOCK (op) != NULL)
8374 struct object_block *block = SYMBOL_REF_BLOCK (op);
8376 dalign = block->alignment;
8377 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8379 else if (CONSTANT_POOL_ADDRESS_P (op))
8381 /* It would be nice to have get_pool_align().. */
8382 machine_mode cmode = get_pool_mode (op);
8384 dalign = GET_MODE_ALIGNMENT (cmode);
8387 else if (DECL_P (decl))
8389 dalign = DECL_ALIGN (decl);
8391 if (dsize == 0)
8393 /* Allow BLKmode when the entire object is known to not
8394 cross a 32k boundary. */
8395 if (!DECL_SIZE_UNIT (decl))
8396 return false;
8398 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8399 return false;
8401 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8402 if (dsize > 32768)
8403 return false;
8405 dalign /= BITS_PER_UNIT;
8406 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8407 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8408 return dalign >= dsize;
8411 else
8412 gcc_unreachable ();
8414 /* Find how many bits of the alignment we know for this access. */
8415 dalign /= BITS_PER_UNIT;
8416 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8417 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8418 mask = dalign - 1;
8419 lsb = offset & -offset;
8420 mask &= lsb - 1;
8421 dalign = mask + 1;
8423 return dalign >= dsize;
8426 static bool
8427 constant_pool_expr_p (rtx op)
8429 rtx base, offset;
8431 split_const (op, &base, &offset);
8432 return (GET_CODE (base) == SYMBOL_REF
8433 && CONSTANT_POOL_ADDRESS_P (base)
8434 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8437 /* These are only used to pass through from print_operand/print_operand_address
8438 to rs6000_output_addr_const_extra over the intervening function
8439 output_addr_const which is not target code. */
8440 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8442 /* Return true if OP is a toc pointer relative address (the output
8443 of create_TOC_reference). If STRICT, do not match non-split
8444 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8445 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8446 TOCREL_OFFSET_RET respectively. */
8448 bool
8449 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8450 const_rtx *tocrel_offset_ret)
8452 if (!TARGET_TOC)
8453 return false;
8455 if (TARGET_CMODEL != CMODEL_SMALL)
8457 /* When strict ensure we have everything tidy. */
8458 if (strict
8459 && !(GET_CODE (op) == LO_SUM
8460 && REG_P (XEXP (op, 0))
8461 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8462 return false;
8464 /* When not strict, allow non-split TOC addresses and also allow
8465 (lo_sum (high ..)) TOC addresses created during reload. */
8466 if (GET_CODE (op) == LO_SUM)
8467 op = XEXP (op, 1);
8470 const_rtx tocrel_base = op;
8471 const_rtx tocrel_offset = const0_rtx;
8473 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8475 tocrel_base = XEXP (op, 0);
8476 tocrel_offset = XEXP (op, 1);
8479 if (tocrel_base_ret)
8480 *tocrel_base_ret = tocrel_base;
8481 if (tocrel_offset_ret)
8482 *tocrel_offset_ret = tocrel_offset;
8484 return (GET_CODE (tocrel_base) == UNSPEC
8485 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8488 /* Return true if X is a constant pool address, and also for cmodel=medium
8489 if X is a toc-relative address known to be offsettable within MODE. */
8491 bool
8492 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8493 bool strict)
8495 const_rtx tocrel_base, tocrel_offset;
8496 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8497 && (TARGET_CMODEL != CMODEL_MEDIUM
8498 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8499 || mode == QImode
8500 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8501 INTVAL (tocrel_offset), mode)));
8504 static bool
8505 legitimate_small_data_p (machine_mode mode, rtx x)
8507 return (DEFAULT_ABI == ABI_V4
8508 && !flag_pic && !TARGET_TOC
8509 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8510 && small_data_operand (x, mode));
8513 bool
8514 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8515 bool strict, bool worst_case)
8517 unsigned HOST_WIDE_INT offset;
8518 unsigned int extra;
8520 if (GET_CODE (x) != PLUS)
8521 return false;
8522 if (!REG_P (XEXP (x, 0)))
8523 return false;
8524 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8525 return false;
8526 if (mode_supports_vsx_dform_quad (mode))
8527 return quad_address_p (x, mode, strict);
8528 if (!reg_offset_addressing_ok_p (mode))
8529 return virtual_stack_registers_memory_p (x);
8530 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8531 return true;
8532 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8533 return false;
8535 offset = INTVAL (XEXP (x, 1));
8536 extra = 0;
8537 switch (mode)
8539 case E_V2SImode:
8540 case E_V2SFmode:
8541 /* Paired single modes: offset addressing isn't valid. */
8542 return false;
8544 case E_DFmode:
8545 case E_DDmode:
8546 case E_DImode:
8547 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8548 addressing. */
8549 if (VECTOR_MEM_VSX_P (mode))
8550 return false;
8552 if (!worst_case)
8553 break;
8554 if (!TARGET_POWERPC64)
8555 extra = 4;
8556 else if (offset & 3)
8557 return false;
8558 break;
8560 case E_TFmode:
8561 case E_IFmode:
8562 case E_KFmode:
8563 case E_TDmode:
8564 case E_TImode:
8565 case E_PTImode:
8566 extra = 8;
8567 if (!worst_case)
8568 break;
8569 if (!TARGET_POWERPC64)
8570 extra = 12;
8571 else if (offset & 3)
8572 return false;
8573 break;
8575 default:
8576 break;
8579 offset += 0x8000;
8580 return offset < 0x10000 - extra;
8583 bool
8584 legitimate_indexed_address_p (rtx x, int strict)
8586 rtx op0, op1;
8588 if (GET_CODE (x) != PLUS)
8589 return false;
8591 op0 = XEXP (x, 0);
8592 op1 = XEXP (x, 1);
8594 return (REG_P (op0) && REG_P (op1)
8595 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8596 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8597 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8598 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8601 bool
8602 avoiding_indexed_address_p (machine_mode mode)
8604 /* Avoid indexed addressing for modes that have non-indexed
8605 load/store instruction forms. */
8606 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8609 bool
8610 legitimate_indirect_address_p (rtx x, int strict)
8612 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8615 bool
8616 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8618 if (!TARGET_MACHO || !flag_pic
8619 || mode != SImode || GET_CODE (x) != MEM)
8620 return false;
8621 x = XEXP (x, 0);
8623 if (GET_CODE (x) != LO_SUM)
8624 return false;
8625 if (GET_CODE (XEXP (x, 0)) != REG)
8626 return false;
8627 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8628 return false;
8629 x = XEXP (x, 1);
8631 return CONSTANT_P (x);
8634 static bool
8635 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8637 if (GET_CODE (x) != LO_SUM)
8638 return false;
8639 if (GET_CODE (XEXP (x, 0)) != REG)
8640 return false;
8641 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8642 return false;
8643 /* quad word addresses are restricted, and we can't use LO_SUM. */
8644 if (mode_supports_vsx_dform_quad (mode))
8645 return false;
8646 x = XEXP (x, 1);
8648 if (TARGET_ELF || TARGET_MACHO)
8650 bool large_toc_ok;
8652 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8653 return false;
8654 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8655 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8656 recognizes some LO_SUM addresses as valid although this
8657 function says opposite. In most cases, LRA through different
8658 transformations can generate correct code for address reloads.
8659 It can not manage only some LO_SUM cases. So we need to add
8660 code analogous to one in rs6000_legitimize_reload_address for
8661 LOW_SUM here saying that some addresses are still valid. */
8662 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8663 && small_toc_ref (x, VOIDmode));
8664 if (TARGET_TOC && ! large_toc_ok)
8665 return false;
8666 if (GET_MODE_NUNITS (mode) != 1)
8667 return false;
8668 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8669 && !(/* ??? Assume floating point reg based on mode? */
8670 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8671 && (mode == DFmode || mode == DDmode)))
8672 return false;
8674 return CONSTANT_P (x) || large_toc_ok;
8677 return false;
8681 /* Try machine-dependent ways of modifying an illegitimate address
8682 to be legitimate. If we find one, return the new, valid address.
8683 This is used from only one place: `memory_address' in explow.c.
8685 OLDX is the address as it was before break_out_memory_refs was
8686 called. In some cases it is useful to look at this to decide what
8687 needs to be done.
8689 It is always safe for this function to do nothing. It exists to
8690 recognize opportunities to optimize the output.
8692 On RS/6000, first check for the sum of a register with a constant
8693 integer that is out of range. If so, generate code to add the
8694 constant with the low-order 16 bits masked to the register and force
8695 this result into another register (this can be done with `cau').
8696 Then generate an address of REG+(CONST&0xffff), allowing for the
8697 possibility of bit 16 being a one.
8699 Then check for the sum of a register and something not constant, try to
8700 load the other things into a register and return the sum. */
8702 static rtx
8703 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8704 machine_mode mode)
8706 unsigned int extra;
8708 if (!reg_offset_addressing_ok_p (mode)
8709 || mode_supports_vsx_dform_quad (mode))
8711 if (virtual_stack_registers_memory_p (x))
8712 return x;
8714 /* In theory we should not be seeing addresses of the form reg+0,
8715 but just in case it is generated, optimize it away. */
8716 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8717 return force_reg (Pmode, XEXP (x, 0));
8719 /* For TImode with load/store quad, restrict addresses to just a single
8720 pointer, so it works with both GPRs and VSX registers. */
8721 /* Make sure both operands are registers. */
8722 else if (GET_CODE (x) == PLUS
8723 && (mode != TImode || !TARGET_VSX))
8724 return gen_rtx_PLUS (Pmode,
8725 force_reg (Pmode, XEXP (x, 0)),
8726 force_reg (Pmode, XEXP (x, 1)));
8727 else
8728 return force_reg (Pmode, x);
8730 if (GET_CODE (x) == SYMBOL_REF)
8732 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8733 if (model != 0)
8734 return rs6000_legitimize_tls_address (x, model);
8737 extra = 0;
8738 switch (mode)
8740 case E_TFmode:
8741 case E_TDmode:
8742 case E_TImode:
8743 case E_PTImode:
8744 case E_IFmode:
8745 case E_KFmode:
8746 /* As in legitimate_offset_address_p we do not assume
8747 worst-case. The mode here is just a hint as to the registers
8748 used. A TImode is usually in gprs, but may actually be in
8749 fprs. Leave worst-case scenario for reload to handle via
8750 insn constraints. PTImode is only GPRs. */
8751 extra = 8;
8752 break;
8753 default:
8754 break;
8757 if (GET_CODE (x) == PLUS
8758 && GET_CODE (XEXP (x, 0)) == REG
8759 && GET_CODE (XEXP (x, 1)) == CONST_INT
8760 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8761 >= 0x10000 - extra)
8762 && !PAIRED_VECTOR_MODE (mode))
8764 HOST_WIDE_INT high_int, low_int;
8765 rtx sum;
8766 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8767 if (low_int >= 0x8000 - extra)
8768 low_int = 0;
8769 high_int = INTVAL (XEXP (x, 1)) - low_int;
8770 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8771 GEN_INT (high_int)), 0);
8772 return plus_constant (Pmode, sum, low_int);
8774 else if (GET_CODE (x) == PLUS
8775 && GET_CODE (XEXP (x, 0)) == REG
8776 && GET_CODE (XEXP (x, 1)) != CONST_INT
8777 && GET_MODE_NUNITS (mode) == 1
8778 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8779 || (/* ??? Assume floating point reg based on mode? */
8780 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8781 && (mode == DFmode || mode == DDmode)))
8782 && !avoiding_indexed_address_p (mode))
8784 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8785 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8787 else if (PAIRED_VECTOR_MODE (mode))
8789 if (mode == DImode)
8790 return x;
8791 /* We accept [reg + reg]. */
8793 if (GET_CODE (x) == PLUS)
8795 rtx op1 = XEXP (x, 0);
8796 rtx op2 = XEXP (x, 1);
8797 rtx y;
8799 op1 = force_reg (Pmode, op1);
8800 op2 = force_reg (Pmode, op2);
8802 /* We can't always do [reg + reg] for these, because [reg +
8803 reg + offset] is not a legitimate addressing mode. */
8804 y = gen_rtx_PLUS (Pmode, op1, op2);
8806 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8807 return force_reg (Pmode, y);
8808 else
8809 return y;
8812 return force_reg (Pmode, x);
8814 else if ((TARGET_ELF
8815 #if TARGET_MACHO
8816 || !MACHO_DYNAMIC_NO_PIC_P
8817 #endif
8819 && TARGET_32BIT
8820 && TARGET_NO_TOC
8821 && ! flag_pic
8822 && GET_CODE (x) != CONST_INT
8823 && GET_CODE (x) != CONST_WIDE_INT
8824 && GET_CODE (x) != CONST_DOUBLE
8825 && CONSTANT_P (x)
8826 && GET_MODE_NUNITS (mode) == 1
8827 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8828 || (/* ??? Assume floating point reg based on mode? */
8829 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8830 && (mode == DFmode || mode == DDmode))))
8832 rtx reg = gen_reg_rtx (Pmode);
8833 if (TARGET_ELF)
8834 emit_insn (gen_elf_high (reg, x));
8835 else
8836 emit_insn (gen_macho_high (reg, x));
8837 return gen_rtx_LO_SUM (Pmode, reg, x);
8839 else if (TARGET_TOC
8840 && GET_CODE (x) == SYMBOL_REF
8841 && constant_pool_expr_p (x)
8842 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8843 return create_TOC_reference (x, NULL_RTX);
8844 else
8845 return x;
8848 /* Debug version of rs6000_legitimize_address. */
8849 static rtx
8850 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8852 rtx ret;
8853 rtx_insn *insns;
8855 start_sequence ();
8856 ret = rs6000_legitimize_address (x, oldx, mode);
8857 insns = get_insns ();
8858 end_sequence ();
8860 if (ret != x)
8862 fprintf (stderr,
8863 "\nrs6000_legitimize_address: mode %s, old code %s, "
8864 "new code %s, modified\n",
8865 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8866 GET_RTX_NAME (GET_CODE (ret)));
8868 fprintf (stderr, "Original address:\n");
8869 debug_rtx (x);
8871 fprintf (stderr, "oldx:\n");
8872 debug_rtx (oldx);
8874 fprintf (stderr, "New address:\n");
8875 debug_rtx (ret);
8877 if (insns)
8879 fprintf (stderr, "Insns added:\n");
8880 debug_rtx_list (insns, 20);
8883 else
8885 fprintf (stderr,
8886 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8887 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8889 debug_rtx (x);
8892 if (insns)
8893 emit_insn (insns);
8895 return ret;
8898 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8899 We need to emit DTP-relative relocations. */
8901 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8902 static void
8903 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8905 switch (size)
8907 case 4:
8908 fputs ("\t.long\t", file);
8909 break;
8910 case 8:
8911 fputs (DOUBLE_INT_ASM_OP, file);
8912 break;
8913 default:
8914 gcc_unreachable ();
8916 output_addr_const (file, x);
8917 if (TARGET_ELF)
8918 fputs ("@dtprel+0x8000", file);
8919 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8921 switch (SYMBOL_REF_TLS_MODEL (x))
8923 case 0:
8924 break;
8925 case TLS_MODEL_LOCAL_EXEC:
8926 fputs ("@le", file);
8927 break;
8928 case TLS_MODEL_INITIAL_EXEC:
8929 fputs ("@ie", file);
8930 break;
8931 case TLS_MODEL_GLOBAL_DYNAMIC:
8932 case TLS_MODEL_LOCAL_DYNAMIC:
8933 fputs ("@m", file);
8934 break;
8935 default:
8936 gcc_unreachable ();
8941 /* Return true if X is a symbol that refers to real (rather than emulated)
8942 TLS. */
8944 static bool
8945 rs6000_real_tls_symbol_ref_p (rtx x)
8947 return (GET_CODE (x) == SYMBOL_REF
8948 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8951 /* In the name of slightly smaller debug output, and to cater to
8952 general assembler lossage, recognize various UNSPEC sequences
8953 and turn them back into a direct symbol reference. */
8955 static rtx
8956 rs6000_delegitimize_address (rtx orig_x)
8958 rtx x, y, offset;
8960 orig_x = delegitimize_mem_from_attrs (orig_x);
8961 x = orig_x;
8962 if (MEM_P (x))
8963 x = XEXP (x, 0);
8965 y = x;
8966 if (TARGET_CMODEL != CMODEL_SMALL
8967 && GET_CODE (y) == LO_SUM)
8968 y = XEXP (y, 1);
8970 offset = NULL_RTX;
8971 if (GET_CODE (y) == PLUS
8972 && GET_MODE (y) == Pmode
8973 && CONST_INT_P (XEXP (y, 1)))
8975 offset = XEXP (y, 1);
8976 y = XEXP (y, 0);
8979 if (GET_CODE (y) == UNSPEC
8980 && XINT (y, 1) == UNSPEC_TOCREL)
8982 y = XVECEXP (y, 0, 0);
8984 #ifdef HAVE_AS_TLS
8985 /* Do not associate thread-local symbols with the original
8986 constant pool symbol. */
8987 if (TARGET_XCOFF
8988 && GET_CODE (y) == SYMBOL_REF
8989 && CONSTANT_POOL_ADDRESS_P (y)
8990 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8991 return orig_x;
8992 #endif
8994 if (offset != NULL_RTX)
8995 y = gen_rtx_PLUS (Pmode, y, offset);
8996 if (!MEM_P (orig_x))
8997 return y;
8998 else
8999 return replace_equiv_address_nv (orig_x, y);
9002 if (TARGET_MACHO
9003 && GET_CODE (orig_x) == LO_SUM
9004 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9006 y = XEXP (XEXP (orig_x, 1), 0);
9007 if (GET_CODE (y) == UNSPEC
9008 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9009 return XVECEXP (y, 0, 0);
9012 return orig_x;
9015 /* Return true if X shouldn't be emitted into the debug info.
9016 The linker doesn't like .toc section references from
9017 .debug_* sections, so reject .toc section symbols. */
9019 static bool
9020 rs6000_const_not_ok_for_debug_p (rtx x)
9022 if (GET_CODE (x) == UNSPEC)
9023 return true;
9024 if (GET_CODE (x) == SYMBOL_REF
9025 && CONSTANT_POOL_ADDRESS_P (x))
9027 rtx c = get_pool_constant (x);
9028 machine_mode cmode = get_pool_mode (x);
9029 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9030 return true;
9033 return false;
9037 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9039 static bool
9040 rs6000_legitimate_combined_insn (rtx_insn *insn)
9042 int icode = INSN_CODE (insn);
9044 /* Reject creating doloop insns. Combine should not be allowed
9045 to create these for a number of reasons:
9046 1) In a nested loop, if combine creates one of these in an
9047 outer loop and the register allocator happens to allocate ctr
9048 to the outer loop insn, then the inner loop can't use ctr.
9049 Inner loops ought to be more highly optimized.
9050 2) Combine often wants to create one of these from what was
9051 originally a three insn sequence, first combining the three
9052 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9053 allocated ctr, the splitter takes use back to the three insn
9054 sequence. It's better to stop combine at the two insn
9055 sequence.
9056 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9057 insns, the register allocator sometimes uses floating point
9058 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9059 jump insn and output reloads are not implemented for jumps,
9060 the ctrsi/ctrdi splitters need to handle all possible cases.
9061 That's a pain, and it gets to be seriously difficult when a
9062 splitter that runs after reload needs memory to transfer from
9063 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9064 for the difficult case. It's better to not create problems
9065 in the first place. */
9066 if (icode != CODE_FOR_nothing
9067 && (icode == CODE_FOR_ctrsi_internal1
9068 || icode == CODE_FOR_ctrdi_internal1
9069 || icode == CODE_FOR_ctrsi_internal2
9070 || icode == CODE_FOR_ctrdi_internal2))
9071 return false;
9073 return true;
9076 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9078 static GTY(()) rtx rs6000_tls_symbol;
9079 static rtx
9080 rs6000_tls_get_addr (void)
9082 if (!rs6000_tls_symbol)
9083 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9085 return rs6000_tls_symbol;
9088 /* Construct the SYMBOL_REF for TLS GOT references. */
9090 static GTY(()) rtx rs6000_got_symbol;
9091 static rtx
9092 rs6000_got_sym (void)
9094 if (!rs6000_got_symbol)
9096 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9097 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9098 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9101 return rs6000_got_symbol;
9104 /* AIX Thread-Local Address support. */
9106 static rtx
9107 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9109 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9110 const char *name;
9111 char *tlsname;
9113 name = XSTR (addr, 0);
9114 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9115 or the symbol will be in TLS private data section. */
9116 if (name[strlen (name) - 1] != ']'
9117 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9118 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9120 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9121 strcpy (tlsname, name);
9122 strcat (tlsname,
9123 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9124 tlsaddr = copy_rtx (addr);
9125 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9127 else
9128 tlsaddr = addr;
9130 /* Place addr into TOC constant pool. */
9131 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9133 /* Output the TOC entry and create the MEM referencing the value. */
9134 if (constant_pool_expr_p (XEXP (sym, 0))
9135 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9137 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9138 mem = gen_const_mem (Pmode, tocref);
9139 set_mem_alias_set (mem, get_TOC_alias_set ());
9141 else
9142 return sym;
9144 /* Use global-dynamic for local-dynamic. */
9145 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9146 || model == TLS_MODEL_LOCAL_DYNAMIC)
9148 /* Create new TOC reference for @m symbol. */
9149 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9150 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9151 strcpy (tlsname, "*LCM");
9152 strcat (tlsname, name + 3);
9153 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9154 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9155 tocref = create_TOC_reference (modaddr, NULL_RTX);
9156 rtx modmem = gen_const_mem (Pmode, tocref);
9157 set_mem_alias_set (modmem, get_TOC_alias_set ());
9159 rtx modreg = gen_reg_rtx (Pmode);
9160 emit_insn (gen_rtx_SET (modreg, modmem));
9162 tmpreg = gen_reg_rtx (Pmode);
9163 emit_insn (gen_rtx_SET (tmpreg, mem));
9165 dest = gen_reg_rtx (Pmode);
9166 if (TARGET_32BIT)
9167 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9168 else
9169 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9170 return dest;
9172 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9173 else if (TARGET_32BIT)
9175 tlsreg = gen_reg_rtx (SImode);
9176 emit_insn (gen_tls_get_tpointer (tlsreg));
9178 else
9179 tlsreg = gen_rtx_REG (DImode, 13);
9181 /* Load the TOC value into temporary register. */
9182 tmpreg = gen_reg_rtx (Pmode);
9183 emit_insn (gen_rtx_SET (tmpreg, mem));
9184 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9185 gen_rtx_MINUS (Pmode, addr, tlsreg));
9187 /* Add TOC symbol value to TLS pointer. */
9188 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9190 return dest;
9193 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9194 this (thread-local) address. */
9196 static rtx
9197 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9199 rtx dest, insn;
9201 if (TARGET_XCOFF)
9202 return rs6000_legitimize_tls_address_aix (addr, model);
9204 dest = gen_reg_rtx (Pmode);
9205 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9207 rtx tlsreg;
9209 if (TARGET_64BIT)
9211 tlsreg = gen_rtx_REG (Pmode, 13);
9212 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9214 else
9216 tlsreg = gen_rtx_REG (Pmode, 2);
9217 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9219 emit_insn (insn);
9221 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9223 rtx tlsreg, tmp;
9225 tmp = gen_reg_rtx (Pmode);
9226 if (TARGET_64BIT)
9228 tlsreg = gen_rtx_REG (Pmode, 13);
9229 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9231 else
9233 tlsreg = gen_rtx_REG (Pmode, 2);
9234 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9236 emit_insn (insn);
9237 if (TARGET_64BIT)
9238 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9239 else
9240 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9241 emit_insn (insn);
9243 else
9245 rtx r3, got, tga, tmp1, tmp2, call_insn;
9247 /* We currently use relocations like @got@tlsgd for tls, which
9248 means the linker will handle allocation of tls entries, placing
9249 them in the .got section. So use a pointer to the .got section,
9250 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9251 or to secondary GOT sections used by 32-bit -fPIC. */
9252 if (TARGET_64BIT)
9253 got = gen_rtx_REG (Pmode, 2);
9254 else
9256 if (flag_pic == 1)
9257 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9258 else
9260 rtx gsym = rs6000_got_sym ();
9261 got = gen_reg_rtx (Pmode);
9262 if (flag_pic == 0)
9263 rs6000_emit_move (got, gsym, Pmode);
9264 else
9266 rtx mem, lab;
9268 tmp1 = gen_reg_rtx (Pmode);
9269 tmp2 = gen_reg_rtx (Pmode);
9270 mem = gen_const_mem (Pmode, tmp1);
9271 lab = gen_label_rtx ();
9272 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9273 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9274 if (TARGET_LINK_STACK)
9275 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9276 emit_move_insn (tmp2, mem);
9277 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9278 set_unique_reg_note (last, REG_EQUAL, gsym);
9283 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9285 tga = rs6000_tls_get_addr ();
9286 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9287 const0_rtx, Pmode);
9289 r3 = gen_rtx_REG (Pmode, 3);
9290 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9292 if (TARGET_64BIT)
9293 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9294 else
9295 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9297 else if (DEFAULT_ABI == ABI_V4)
9298 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9299 else
9300 gcc_unreachable ();
9301 call_insn = last_call_insn ();
9302 PATTERN (call_insn) = insn;
9303 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9304 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9305 pic_offset_table_rtx);
9307 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9309 tga = rs6000_tls_get_addr ();
9310 tmp1 = gen_reg_rtx (Pmode);
9311 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9312 const0_rtx, Pmode);
9314 r3 = gen_rtx_REG (Pmode, 3);
9315 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9317 if (TARGET_64BIT)
9318 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9319 else
9320 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9322 else if (DEFAULT_ABI == ABI_V4)
9323 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9324 else
9325 gcc_unreachable ();
9326 call_insn = last_call_insn ();
9327 PATTERN (call_insn) = insn;
9328 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9329 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9330 pic_offset_table_rtx);
9332 if (rs6000_tls_size == 16)
9334 if (TARGET_64BIT)
9335 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9336 else
9337 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9339 else if (rs6000_tls_size == 32)
9341 tmp2 = gen_reg_rtx (Pmode);
9342 if (TARGET_64BIT)
9343 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9344 else
9345 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9346 emit_insn (insn);
9347 if (TARGET_64BIT)
9348 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9349 else
9350 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9352 else
9354 tmp2 = gen_reg_rtx (Pmode);
9355 if (TARGET_64BIT)
9356 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9357 else
9358 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9359 emit_insn (insn);
9360 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9362 emit_insn (insn);
9364 else
9366 /* IE, or 64-bit offset LE. */
9367 tmp2 = gen_reg_rtx (Pmode);
9368 if (TARGET_64BIT)
9369 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9370 else
9371 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9372 emit_insn (insn);
9373 if (TARGET_64BIT)
9374 insn = gen_tls_tls_64 (dest, tmp2, addr);
9375 else
9376 insn = gen_tls_tls_32 (dest, tmp2, addr);
9377 emit_insn (insn);
9381 return dest;
9384 /* Only create the global variable for the stack protect guard if we are using
9385 the global flavor of that guard. */
9386 static tree
9387 rs6000_init_stack_protect_guard (void)
9389 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9390 return default_stack_protect_guard ();
9392 return NULL_TREE;
9395 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9397 static bool
9398 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9400 if (GET_CODE (x) == HIGH
9401 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9402 return true;
9404 /* A TLS symbol in the TOC cannot contain a sum. */
9405 if (GET_CODE (x) == CONST
9406 && GET_CODE (XEXP (x, 0)) == PLUS
9407 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9408 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9409 return true;
9411 /* Do not place an ELF TLS symbol in the constant pool. */
9412 return TARGET_ELF && tls_referenced_p (x);
9415 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9416 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9417 can be addressed relative to the toc pointer. */
9419 static bool
9420 use_toc_relative_ref (rtx sym, machine_mode mode)
9422 return ((constant_pool_expr_p (sym)
9423 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9424 get_pool_mode (sym)))
9425 || (TARGET_CMODEL == CMODEL_MEDIUM
9426 && SYMBOL_REF_LOCAL_P (sym)
9427 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9430 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9431 replace the input X, or the original X if no replacement is called for.
9432 The output parameter *WIN is 1 if the calling macro should goto WIN,
9433 0 if it should not.
9435 For RS/6000, we wish to handle large displacements off a base
9436 register by splitting the addend across an addiu/addis and the mem insn.
9437 This cuts number of extra insns needed from 3 to 1.
9439 On Darwin, we use this to generate code for floating point constants.
9440 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9441 The Darwin code is inside #if TARGET_MACHO because only then are the
9442 machopic_* functions defined. */
9443 static rtx
9444 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9445 int opnum, int type,
9446 int ind_levels ATTRIBUTE_UNUSED, int *win)
9448 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9449 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9451 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9452 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9453 if (reg_offset_p
9454 && opnum == 1
9455 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9456 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9457 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9458 && TARGET_P9_VECTOR)
9459 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9460 && TARGET_P9_VECTOR)))
9461 reg_offset_p = false;
9463 /* We must recognize output that we have already generated ourselves. */
9464 if (GET_CODE (x) == PLUS
9465 && GET_CODE (XEXP (x, 0)) == PLUS
9466 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9467 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9468 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9470 if (TARGET_DEBUG_ADDR)
9472 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9473 debug_rtx (x);
9475 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9476 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9477 opnum, (enum reload_type) type);
9478 *win = 1;
9479 return x;
9482 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9483 if (GET_CODE (x) == LO_SUM
9484 && GET_CODE (XEXP (x, 0)) == HIGH)
9486 if (TARGET_DEBUG_ADDR)
9488 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9489 debug_rtx (x);
9491 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9492 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9493 opnum, (enum reload_type) type);
9494 *win = 1;
9495 return x;
9498 #if TARGET_MACHO
9499 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9500 && GET_CODE (x) == LO_SUM
9501 && GET_CODE (XEXP (x, 0)) == PLUS
9502 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9503 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9504 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9505 && machopic_operand_p (XEXP (x, 1)))
9507 /* Result of previous invocation of this function on Darwin
9508 floating point constant. */
9509 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9510 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9511 opnum, (enum reload_type) type);
9512 *win = 1;
9513 return x;
9515 #endif
9517 if (TARGET_CMODEL != CMODEL_SMALL
9518 && reg_offset_p
9519 && !quad_offset_p
9520 && small_toc_ref (x, VOIDmode))
9522 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9523 x = gen_rtx_LO_SUM (Pmode, hi, x);
9524 if (TARGET_DEBUG_ADDR)
9526 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9527 debug_rtx (x);
9529 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9530 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9531 opnum, (enum reload_type) type);
9532 *win = 1;
9533 return x;
9536 if (GET_CODE (x) == PLUS
9537 && REG_P (XEXP (x, 0))
9538 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9539 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9540 && CONST_INT_P (XEXP (x, 1))
9541 && reg_offset_p
9542 && !PAIRED_VECTOR_MODE (mode)
9543 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9545 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9546 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9547 HOST_WIDE_INT high
9548 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9550 /* Check for 32-bit overflow or quad addresses with one of the
9551 four least significant bits set. */
9552 if (high + low != val
9553 || (quad_offset_p && (low & 0xf)))
9555 *win = 0;
9556 return x;
9559 /* Reload the high part into a base reg; leave the low part
9560 in the mem directly. */
9562 x = gen_rtx_PLUS (GET_MODE (x),
9563 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9564 GEN_INT (high)),
9565 GEN_INT (low));
9567 if (TARGET_DEBUG_ADDR)
9569 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9570 debug_rtx (x);
9572 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9573 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9574 opnum, (enum reload_type) type);
9575 *win = 1;
9576 return x;
9579 if (GET_CODE (x) == SYMBOL_REF
9580 && reg_offset_p
9581 && !quad_offset_p
9582 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9583 && !PAIRED_VECTOR_MODE (mode)
9584 #if TARGET_MACHO
9585 && DEFAULT_ABI == ABI_DARWIN
9586 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9587 && machopic_symbol_defined_p (x)
9588 #else
9589 && DEFAULT_ABI == ABI_V4
9590 && !flag_pic
9591 #endif
9592 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9593 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9594 without fprs.
9595 ??? Assume floating point reg based on mode? This assumption is
9596 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9597 where reload ends up doing a DFmode load of a constant from
9598 mem using two gprs. Unfortunately, at this point reload
9599 hasn't yet selected regs so poking around in reload data
9600 won't help and even if we could figure out the regs reliably,
9601 we'd still want to allow this transformation when the mem is
9602 naturally aligned. Since we say the address is good here, we
9603 can't disable offsets from LO_SUMs in mem_operand_gpr.
9604 FIXME: Allow offset from lo_sum for other modes too, when
9605 mem is sufficiently aligned.
9607 Also disallow this if the type can go in VMX/Altivec registers, since
9608 those registers do not have d-form (reg+offset) address modes. */
9609 && !reg_addr[mode].scalar_in_vmx_p
9610 && mode != TFmode
9611 && mode != TDmode
9612 && mode != IFmode
9613 && mode != KFmode
9614 && (mode != TImode || !TARGET_VSX)
9615 && mode != PTImode
9616 && (mode != DImode || TARGET_POWERPC64)
9617 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9618 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9620 #if TARGET_MACHO
9621 if (flag_pic)
9623 rtx offset = machopic_gen_offset (x);
9624 x = gen_rtx_LO_SUM (GET_MODE (x),
9625 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9626 gen_rtx_HIGH (Pmode, offset)), offset);
9628 else
9629 #endif
9630 x = gen_rtx_LO_SUM (GET_MODE (x),
9631 gen_rtx_HIGH (Pmode, x), x);
9633 if (TARGET_DEBUG_ADDR)
9635 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9636 debug_rtx (x);
9638 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9639 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9640 opnum, (enum reload_type) type);
9641 *win = 1;
9642 return x;
9645 /* Reload an offset address wrapped by an AND that represents the
9646 masking of the lower bits. Strip the outer AND and let reload
9647 convert the offset address into an indirect address. For VSX,
9648 force reload to create the address with an AND in a separate
9649 register, because we can't guarantee an altivec register will
9650 be used. */
9651 if (VECTOR_MEM_ALTIVEC_P (mode)
9652 && GET_CODE (x) == AND
9653 && GET_CODE (XEXP (x, 0)) == PLUS
9654 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9655 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9656 && GET_CODE (XEXP (x, 1)) == CONST_INT
9657 && INTVAL (XEXP (x, 1)) == -16)
9659 x = XEXP (x, 0);
9660 *win = 1;
9661 return x;
9664 if (TARGET_TOC
9665 && reg_offset_p
9666 && !quad_offset_p
9667 && GET_CODE (x) == SYMBOL_REF
9668 && use_toc_relative_ref (x, mode))
9670 x = create_TOC_reference (x, NULL_RTX);
9671 if (TARGET_CMODEL != CMODEL_SMALL)
9673 if (TARGET_DEBUG_ADDR)
9675 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9676 debug_rtx (x);
9678 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9679 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9680 opnum, (enum reload_type) type);
9682 *win = 1;
9683 return x;
9685 *win = 0;
9686 return x;
9689 /* Debug version of rs6000_legitimize_reload_address. */
9690 static rtx
9691 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9692 int opnum, int type,
9693 int ind_levels, int *win)
9695 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9696 ind_levels, win);
9697 fprintf (stderr,
9698 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9699 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9700 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9701 debug_rtx (x);
9703 if (x == ret)
9704 fprintf (stderr, "Same address returned\n");
9705 else if (!ret)
9706 fprintf (stderr, "NULL returned\n");
9707 else
9709 fprintf (stderr, "New address:\n");
9710 debug_rtx (ret);
9713 return ret;
9716 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9717 that is a valid memory address for an instruction.
9718 The MODE argument is the machine mode for the MEM expression
9719 that wants to use this address.
9721 On the RS/6000, there are four valid address: a SYMBOL_REF that
9722 refers to a constant pool entry of an address (or the sum of it
9723 plus a constant), a short (16-bit signed) constant plus a register,
9724 the sum of two registers, or a register indirect, possibly with an
9725 auto-increment. For DFmode, DDmode and DImode with a constant plus
9726 register, we must ensure that both words are addressable or PowerPC64
9727 with offset word aligned.
9729 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9730 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9731 because adjacent memory cells are accessed by adding word-sized offsets
9732 during assembly output. */
9733 static bool
9734 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9736 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9737 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9739 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9740 if (VECTOR_MEM_ALTIVEC_P (mode)
9741 && GET_CODE (x) == AND
9742 && GET_CODE (XEXP (x, 1)) == CONST_INT
9743 && INTVAL (XEXP (x, 1)) == -16)
9744 x = XEXP (x, 0);
9746 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9747 return 0;
9748 if (legitimate_indirect_address_p (x, reg_ok_strict))
9749 return 1;
9750 if (TARGET_UPDATE
9751 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9752 && mode_supports_pre_incdec_p (mode)
9753 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9754 return 1;
9755 /* Handle restricted vector d-form offsets in ISA 3.0. */
9756 if (quad_offset_p)
9758 if (quad_address_p (x, mode, reg_ok_strict))
9759 return 1;
9761 else if (virtual_stack_registers_memory_p (x))
9762 return 1;
9764 else if (reg_offset_p)
9766 if (legitimate_small_data_p (mode, x))
9767 return 1;
9768 if (legitimate_constant_pool_address_p (x, mode,
9769 reg_ok_strict || lra_in_progress))
9770 return 1;
9771 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9772 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9773 return 1;
9776 /* For TImode, if we have TImode in VSX registers, only allow register
9777 indirect addresses. This will allow the values to go in either GPRs
9778 or VSX registers without reloading. The vector types would tend to
9779 go into VSX registers, so we allow REG+REG, while TImode seems
9780 somewhat split, in that some uses are GPR based, and some VSX based. */
9781 /* FIXME: We could loosen this by changing the following to
9782 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9783 but currently we cannot allow REG+REG addressing for TImode. See
9784 PR72827 for complete details on how this ends up hoodwinking DSE. */
9785 if (mode == TImode && TARGET_VSX)
9786 return 0;
9787 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9788 if (! reg_ok_strict
9789 && reg_offset_p
9790 && GET_CODE (x) == PLUS
9791 && GET_CODE (XEXP (x, 0)) == REG
9792 && (XEXP (x, 0) == virtual_stack_vars_rtx
9793 || XEXP (x, 0) == arg_pointer_rtx)
9794 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9795 return 1;
9796 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9797 return 1;
9798 if (!FLOAT128_2REG_P (mode)
9799 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9800 || TARGET_POWERPC64
9801 || (mode != DFmode && mode != DDmode))
9802 && (TARGET_POWERPC64 || mode != DImode)
9803 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9804 && mode != PTImode
9805 && !avoiding_indexed_address_p (mode)
9806 && legitimate_indexed_address_p (x, reg_ok_strict))
9807 return 1;
9808 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9809 && mode_supports_pre_modify_p (mode)
9810 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9811 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9812 reg_ok_strict, false)
9813 || (!avoiding_indexed_address_p (mode)
9814 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9815 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9816 return 1;
9817 if (reg_offset_p && !quad_offset_p
9818 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9819 return 1;
9820 return 0;
9823 /* Debug version of rs6000_legitimate_address_p. */
9824 static bool
9825 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9826 bool reg_ok_strict)
9828 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9829 fprintf (stderr,
9830 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9831 "strict = %d, reload = %s, code = %s\n",
9832 ret ? "true" : "false",
9833 GET_MODE_NAME (mode),
9834 reg_ok_strict,
9835 (reload_completed ? "after" : "before"),
9836 GET_RTX_NAME (GET_CODE (x)));
9837 debug_rtx (x);
9839 return ret;
9842 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9844 static bool
9845 rs6000_mode_dependent_address_p (const_rtx addr,
9846 addr_space_t as ATTRIBUTE_UNUSED)
9848 return rs6000_mode_dependent_address_ptr (addr);
9851 /* Go to LABEL if ADDR (a legitimate address expression)
9852 has an effect that depends on the machine mode it is used for.
9854 On the RS/6000 this is true of all integral offsets (since AltiVec
9855 and VSX modes don't allow them) or is a pre-increment or decrement.
9857 ??? Except that due to conceptual problems in offsettable_address_p
9858 we can't really report the problems of integral offsets. So leave
9859 this assuming that the adjustable offset must be valid for the
9860 sub-words of a TFmode operand, which is what we had before. */
9862 static bool
9863 rs6000_mode_dependent_address (const_rtx addr)
9865 switch (GET_CODE (addr))
9867 case PLUS:
9868 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9869 is considered a legitimate address before reload, so there
9870 are no offset restrictions in that case. Note that this
9871 condition is safe in strict mode because any address involving
9872 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9873 been rejected as illegitimate. */
9874 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9875 && XEXP (addr, 0) != arg_pointer_rtx
9876 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9878 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9879 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9881 break;
9883 case LO_SUM:
9884 /* Anything in the constant pool is sufficiently aligned that
9885 all bytes have the same high part address. */
9886 return !legitimate_constant_pool_address_p (addr, QImode, false);
9888 /* Auto-increment cases are now treated generically in recog.c. */
9889 case PRE_MODIFY:
9890 return TARGET_UPDATE;
9892 /* AND is only allowed in Altivec loads. */
9893 case AND:
9894 return true;
9896 default:
9897 break;
9900 return false;
9903 /* Debug version of rs6000_mode_dependent_address. */
9904 static bool
9905 rs6000_debug_mode_dependent_address (const_rtx addr)
9907 bool ret = rs6000_mode_dependent_address (addr);
9909 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9910 ret ? "true" : "false");
9911 debug_rtx (addr);
9913 return ret;
9916 /* Implement FIND_BASE_TERM. */
9919 rs6000_find_base_term (rtx op)
9921 rtx base;
9923 base = op;
9924 if (GET_CODE (base) == CONST)
9925 base = XEXP (base, 0);
9926 if (GET_CODE (base) == PLUS)
9927 base = XEXP (base, 0);
9928 if (GET_CODE (base) == UNSPEC)
9929 switch (XINT (base, 1))
9931 case UNSPEC_TOCREL:
9932 case UNSPEC_MACHOPIC_OFFSET:
9933 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9934 for aliasing purposes. */
9935 return XVECEXP (base, 0, 0);
9938 return op;
9941 /* More elaborate version of recog's offsettable_memref_p predicate
9942 that works around the ??? note of rs6000_mode_dependent_address.
9943 In particular it accepts
9945 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9947 in 32-bit mode, that the recog predicate rejects. */
9949 static bool
9950 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9952 bool worst_case;
9954 if (!MEM_P (op))
9955 return false;
9957 /* First mimic offsettable_memref_p. */
9958 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9959 return true;
9961 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9962 the latter predicate knows nothing about the mode of the memory
9963 reference and, therefore, assumes that it is the largest supported
9964 mode (TFmode). As a consequence, legitimate offsettable memory
9965 references are rejected. rs6000_legitimate_offset_address_p contains
9966 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9967 at least with a little bit of help here given that we know the
9968 actual registers used. */
9969 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9970 || GET_MODE_SIZE (reg_mode) == 4);
9971 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9972 true, worst_case);
9975 /* Determine the reassociation width to be used in reassociate_bb.
9976 This takes into account how many parallel operations we
9977 can actually do of a given type, and also the latency.
9979 int add/sub 6/cycle
9980 mul 2/cycle
9981 vect add/sub/mul 2/cycle
9982 fp add/sub/mul 2/cycle
9983 dfp 1/cycle
9986 static int
9987 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9988 machine_mode mode)
9990 switch (rs6000_cpu)
9992 case PROCESSOR_POWER8:
9993 case PROCESSOR_POWER9:
9994 if (DECIMAL_FLOAT_MODE_P (mode))
9995 return 1;
9996 if (VECTOR_MODE_P (mode))
9997 return 4;
9998 if (INTEGRAL_MODE_P (mode))
9999 return opc == MULT_EXPR ? 4 : 6;
10000 if (FLOAT_MODE_P (mode))
10001 return 4;
10002 break;
10003 default:
10004 break;
10006 return 1;
10009 /* Change register usage conditional on target flags. */
10010 static void
10011 rs6000_conditional_register_usage (void)
10013 int i;
10015 if (TARGET_DEBUG_TARGET)
10016 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10018 /* Set MQ register fixed (already call_used) so that it will not be
10019 allocated. */
10020 fixed_regs[64] = 1;
10022 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10023 if (TARGET_64BIT)
10024 fixed_regs[13] = call_used_regs[13]
10025 = call_really_used_regs[13] = 1;
10027 /* Conditionally disable FPRs. */
10028 if (TARGET_SOFT_FLOAT)
10029 for (i = 32; i < 64; i++)
10030 fixed_regs[i] = call_used_regs[i]
10031 = call_really_used_regs[i] = 1;
10033 /* The TOC register is not killed across calls in a way that is
10034 visible to the compiler. */
10035 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10036 call_really_used_regs[2] = 0;
10038 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10039 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10041 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10042 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10043 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10044 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10046 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10047 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10048 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10049 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10051 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10052 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10053 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10055 if (!TARGET_ALTIVEC && !TARGET_VSX)
10057 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10058 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10059 call_really_used_regs[VRSAVE_REGNO] = 1;
10062 if (TARGET_ALTIVEC || TARGET_VSX)
10063 global_regs[VSCR_REGNO] = 1;
10065 if (TARGET_ALTIVEC_ABI)
10067 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10068 call_used_regs[i] = call_really_used_regs[i] = 1;
10070 /* AIX reserves VR20:31 in non-extended ABI mode. */
10071 if (TARGET_XCOFF)
10072 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10073 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10078 /* Output insns to set DEST equal to the constant SOURCE as a series of
10079 lis, ori and shl instructions and return TRUE. */
10081 bool
10082 rs6000_emit_set_const (rtx dest, rtx source)
10084 machine_mode mode = GET_MODE (dest);
10085 rtx temp, set;
10086 rtx_insn *insn;
10087 HOST_WIDE_INT c;
10089 gcc_checking_assert (CONST_INT_P (source));
10090 c = INTVAL (source);
10091 switch (mode)
10093 case E_QImode:
10094 case E_HImode:
10095 emit_insn (gen_rtx_SET (dest, source));
10096 return true;
10098 case E_SImode:
10099 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10101 emit_insn (gen_rtx_SET (copy_rtx (temp),
10102 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10103 emit_insn (gen_rtx_SET (dest,
10104 gen_rtx_IOR (SImode, copy_rtx (temp),
10105 GEN_INT (c & 0xffff))));
10106 break;
10108 case E_DImode:
10109 if (!TARGET_POWERPC64)
10111 rtx hi, lo;
10113 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10114 DImode);
10115 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10116 DImode);
10117 emit_move_insn (hi, GEN_INT (c >> 32));
10118 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10119 emit_move_insn (lo, GEN_INT (c));
10121 else
10122 rs6000_emit_set_long_const (dest, c);
10123 break;
10125 default:
10126 gcc_unreachable ();
10129 insn = get_last_insn ();
10130 set = single_set (insn);
10131 if (! CONSTANT_P (SET_SRC (set)))
10132 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10134 return true;
10137 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10138 Output insns to set DEST equal to the constant C as a series of
10139 lis, ori and shl instructions. */
10141 static void
10142 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10144 rtx temp;
10145 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10147 ud1 = c & 0xffff;
10148 c = c >> 16;
10149 ud2 = c & 0xffff;
10150 c = c >> 16;
10151 ud3 = c & 0xffff;
10152 c = c >> 16;
10153 ud4 = c & 0xffff;
10155 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10156 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10157 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10159 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10160 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10162 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10164 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10165 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10166 if (ud1 != 0)
10167 emit_move_insn (dest,
10168 gen_rtx_IOR (DImode, copy_rtx (temp),
10169 GEN_INT (ud1)));
10171 else if (ud3 == 0 && ud4 == 0)
10173 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10175 gcc_assert (ud2 & 0x8000);
10176 emit_move_insn (copy_rtx (temp),
10177 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10178 if (ud1 != 0)
10179 emit_move_insn (copy_rtx (temp),
10180 gen_rtx_IOR (DImode, copy_rtx (temp),
10181 GEN_INT (ud1)));
10182 emit_move_insn (dest,
10183 gen_rtx_ZERO_EXTEND (DImode,
10184 gen_lowpart (SImode,
10185 copy_rtx (temp))));
10187 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10188 || (ud4 == 0 && ! (ud3 & 0x8000)))
10190 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10192 emit_move_insn (copy_rtx (temp),
10193 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10194 if (ud2 != 0)
10195 emit_move_insn (copy_rtx (temp),
10196 gen_rtx_IOR (DImode, copy_rtx (temp),
10197 GEN_INT (ud2)));
10198 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10199 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10200 GEN_INT (16)));
10201 if (ud1 != 0)
10202 emit_move_insn (dest,
10203 gen_rtx_IOR (DImode, copy_rtx (temp),
10204 GEN_INT (ud1)));
10206 else
10208 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10210 emit_move_insn (copy_rtx (temp),
10211 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10212 if (ud3 != 0)
10213 emit_move_insn (copy_rtx (temp),
10214 gen_rtx_IOR (DImode, copy_rtx (temp),
10215 GEN_INT (ud3)));
10217 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10218 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10219 GEN_INT (32)));
10220 if (ud2 != 0)
10221 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10222 gen_rtx_IOR (DImode, copy_rtx (temp),
10223 GEN_INT (ud2 << 16)));
10224 if (ud1 != 0)
10225 emit_move_insn (dest,
10226 gen_rtx_IOR (DImode, copy_rtx (temp),
10227 GEN_INT (ud1)));
10231 /* Helper for the following. Get rid of [r+r] memory refs
10232 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10234 static void
10235 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10237 if (GET_CODE (operands[0]) == MEM
10238 && GET_CODE (XEXP (operands[0], 0)) != REG
10239 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10240 GET_MODE (operands[0]), false))
10241 operands[0]
10242 = replace_equiv_address (operands[0],
10243 copy_addr_to_reg (XEXP (operands[0], 0)));
10245 if (GET_CODE (operands[1]) == MEM
10246 && GET_CODE (XEXP (operands[1], 0)) != REG
10247 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10248 GET_MODE (operands[1]), false))
10249 operands[1]
10250 = replace_equiv_address (operands[1],
10251 copy_addr_to_reg (XEXP (operands[1], 0)));
10254 /* Generate a vector of constants to permute MODE for a little-endian
10255 storage operation by swapping the two halves of a vector. */
10256 static rtvec
10257 rs6000_const_vec (machine_mode mode)
10259 int i, subparts;
10260 rtvec v;
10262 switch (mode)
10264 case E_V1TImode:
10265 subparts = 1;
10266 break;
10267 case E_V2DFmode:
10268 case E_V2DImode:
10269 subparts = 2;
10270 break;
10271 case E_V4SFmode:
10272 case E_V4SImode:
10273 subparts = 4;
10274 break;
10275 case E_V8HImode:
10276 subparts = 8;
10277 break;
10278 case E_V16QImode:
10279 subparts = 16;
10280 break;
10281 default:
10282 gcc_unreachable();
10285 v = rtvec_alloc (subparts);
10287 for (i = 0; i < subparts / 2; ++i)
10288 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10289 for (i = subparts / 2; i < subparts; ++i)
10290 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10292 return v;
10295 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10296 store operation. */
10297 void
10298 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10300 /* Scalar permutations are easier to express in integer modes rather than
10301 floating-point modes, so cast them here. We use V1TImode instead
10302 of TImode to ensure that the values don't go through GPRs. */
10303 if (FLOAT128_VECTOR_P (mode))
10305 dest = gen_lowpart (V1TImode, dest);
10306 source = gen_lowpart (V1TImode, source);
10307 mode = V1TImode;
10310 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10311 scalar. */
10312 if (mode == TImode || mode == V1TImode)
10313 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10314 GEN_INT (64))));
10315 else
10317 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10318 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10322 /* Emit a little-endian load from vector memory location SOURCE to VSX
10323 register DEST in mode MODE. The load is done with two permuting
10324 insn's that represent an lxvd2x and xxpermdi. */
10325 void
10326 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10328 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10329 V1TImode). */
10330 if (mode == TImode || mode == V1TImode)
10332 mode = V2DImode;
10333 dest = gen_lowpart (V2DImode, dest);
10334 source = adjust_address (source, V2DImode, 0);
10337 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10338 rs6000_emit_le_vsx_permute (tmp, source, mode);
10339 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10342 /* Emit a little-endian store to vector memory location DEST from VSX
10343 register SOURCE in mode MODE. The store is done with two permuting
10344 insn's that represent an xxpermdi and an stxvd2x. */
10345 void
10346 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10348 /* This should never be called during or after LRA, because it does
10349 not re-permute the source register. It is intended only for use
10350 during expand. */
10351 gcc_assert (!lra_in_progress && !reload_completed);
10353 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10354 V1TImode). */
10355 if (mode == TImode || mode == V1TImode)
10357 mode = V2DImode;
10358 dest = adjust_address (dest, V2DImode, 0);
10359 source = gen_lowpart (V2DImode, source);
10362 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10363 rs6000_emit_le_vsx_permute (tmp, source, mode);
10364 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10367 /* Emit a sequence representing a little-endian VSX load or store,
10368 moving data from SOURCE to DEST in mode MODE. This is done
10369 separately from rs6000_emit_move to ensure it is called only
10370 during expand. LE VSX loads and stores introduced later are
10371 handled with a split. The expand-time RTL generation allows
10372 us to optimize away redundant pairs of register-permutes. */
10373 void
10374 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10376 gcc_assert (!BYTES_BIG_ENDIAN
10377 && VECTOR_MEM_VSX_P (mode)
10378 && !TARGET_P9_VECTOR
10379 && !gpr_or_gpr_p (dest, source)
10380 && (MEM_P (source) ^ MEM_P (dest)));
10382 if (MEM_P (source))
10384 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10385 rs6000_emit_le_vsx_load (dest, source, mode);
10387 else
10389 if (!REG_P (source))
10390 source = force_reg (mode, source);
10391 rs6000_emit_le_vsx_store (dest, source, mode);
10395 /* Return whether a SFmode or SImode move can be done without converting one
10396 mode to another. This arrises when we have:
10398 (SUBREG:SF (REG:SI ...))
10399 (SUBREG:SI (REG:SF ...))
10401 and one of the values is in a floating point/vector register, where SFmode
10402 scalars are stored in DFmode format. */
10404 bool
10405 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10407 if (TARGET_ALLOW_SF_SUBREG)
10408 return true;
10410 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10411 return true;
10413 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10414 return true;
10416 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10417 if (SUBREG_P (dest))
10419 rtx dest_subreg = SUBREG_REG (dest);
10420 rtx src_subreg = SUBREG_REG (src);
10421 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10424 return false;
10428 /* Helper function to change moves with:
10430 (SUBREG:SF (REG:SI)) and
10431 (SUBREG:SI (REG:SF))
10433 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10434 values are stored as DFmode values in the VSX registers. We need to convert
10435 the bits before we can use a direct move or operate on the bits in the
10436 vector register as an integer type.
10438 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10440 static bool
10441 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10443 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10444 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10445 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10447 rtx inner_source = SUBREG_REG (source);
10448 machine_mode inner_mode = GET_MODE (inner_source);
10450 if (mode == SImode && inner_mode == SFmode)
10452 emit_insn (gen_movsi_from_sf (dest, inner_source));
10453 return true;
10456 if (mode == SFmode && inner_mode == SImode)
10458 emit_insn (gen_movsf_from_si (dest, inner_source));
10459 return true;
10463 return false;
10466 /* Emit a move from SOURCE to DEST in mode MODE. */
10467 void
10468 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10470 rtx operands[2];
10471 operands[0] = dest;
10472 operands[1] = source;
10474 if (TARGET_DEBUG_ADDR)
10476 fprintf (stderr,
10477 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10478 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10479 GET_MODE_NAME (mode),
10480 lra_in_progress,
10481 reload_completed,
10482 can_create_pseudo_p ());
10483 debug_rtx (dest);
10484 fprintf (stderr, "source:\n");
10485 debug_rtx (source);
10488 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10489 if (CONST_WIDE_INT_P (operands[1])
10490 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10492 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10493 gcc_unreachable ();
10496 /* See if we need to special case SImode/SFmode SUBREG moves. */
10497 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10498 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10499 return;
10501 /* Check if GCC is setting up a block move that will end up using FP
10502 registers as temporaries. We must make sure this is acceptable. */
10503 if (GET_CODE (operands[0]) == MEM
10504 && GET_CODE (operands[1]) == MEM
10505 && mode == DImode
10506 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10507 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10508 && ! (rs6000_slow_unaligned_access (SImode,
10509 (MEM_ALIGN (operands[0]) > 32
10510 ? 32 : MEM_ALIGN (operands[0])))
10511 || rs6000_slow_unaligned_access (SImode,
10512 (MEM_ALIGN (operands[1]) > 32
10513 ? 32 : MEM_ALIGN (operands[1]))))
10514 && ! MEM_VOLATILE_P (operands [0])
10515 && ! MEM_VOLATILE_P (operands [1]))
10517 emit_move_insn (adjust_address (operands[0], SImode, 0),
10518 adjust_address (operands[1], SImode, 0));
10519 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10520 adjust_address (copy_rtx (operands[1]), SImode, 4));
10521 return;
10524 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10525 && !gpc_reg_operand (operands[1], mode))
10526 operands[1] = force_reg (mode, operands[1]);
10528 /* Recognize the case where operand[1] is a reference to thread-local
10529 data and load its address to a register. */
10530 if (tls_referenced_p (operands[1]))
10532 enum tls_model model;
10533 rtx tmp = operands[1];
10534 rtx addend = NULL;
10536 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10538 addend = XEXP (XEXP (tmp, 0), 1);
10539 tmp = XEXP (XEXP (tmp, 0), 0);
10542 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10543 model = SYMBOL_REF_TLS_MODEL (tmp);
10544 gcc_assert (model != 0);
10546 tmp = rs6000_legitimize_tls_address (tmp, model);
10547 if (addend)
10549 tmp = gen_rtx_PLUS (mode, tmp, addend);
10550 tmp = force_operand (tmp, operands[0]);
10552 operands[1] = tmp;
10555 /* 128-bit constant floating-point values on Darwin should really be loaded
10556 as two parts. However, this premature splitting is a problem when DFmode
10557 values can go into Altivec registers. */
10558 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10559 && GET_CODE (operands[1]) == CONST_DOUBLE)
10561 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10562 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10563 DFmode);
10564 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10565 GET_MODE_SIZE (DFmode)),
10566 simplify_gen_subreg (DFmode, operands[1], mode,
10567 GET_MODE_SIZE (DFmode)),
10568 DFmode);
10569 return;
10572 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10573 p1:SD) if p1 is not of floating point class and p0 is spilled as
10574 we can have no analogous movsd_store for this. */
10575 if (lra_in_progress && mode == DDmode
10576 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10577 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10578 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10579 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10581 enum reg_class cl;
10582 int regno = REGNO (SUBREG_REG (operands[1]));
10584 if (regno >= FIRST_PSEUDO_REGISTER)
10586 cl = reg_preferred_class (regno);
10587 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10589 if (regno >= 0 && ! FP_REGNO_P (regno))
10591 mode = SDmode;
10592 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10593 operands[1] = SUBREG_REG (operands[1]);
10596 if (lra_in_progress
10597 && mode == SDmode
10598 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10599 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10600 && (REG_P (operands[1])
10601 || (GET_CODE (operands[1]) == SUBREG
10602 && REG_P (SUBREG_REG (operands[1])))))
10604 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10605 ? SUBREG_REG (operands[1]) : operands[1]);
10606 enum reg_class cl;
10608 if (regno >= FIRST_PSEUDO_REGISTER)
10610 cl = reg_preferred_class (regno);
10611 gcc_assert (cl != NO_REGS);
10612 regno = ira_class_hard_regs[cl][0];
10614 if (FP_REGNO_P (regno))
10616 if (GET_MODE (operands[0]) != DDmode)
10617 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10618 emit_insn (gen_movsd_store (operands[0], operands[1]));
10620 else if (INT_REGNO_P (regno))
10621 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10622 else
10623 gcc_unreachable();
10624 return;
10626 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10627 p:DD)) if p0 is not of floating point class and p1 is spilled as
10628 we can have no analogous movsd_load for this. */
10629 if (lra_in_progress && mode == DDmode
10630 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10631 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10632 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10633 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10635 enum reg_class cl;
10636 int regno = REGNO (SUBREG_REG (operands[0]));
10638 if (regno >= FIRST_PSEUDO_REGISTER)
10640 cl = reg_preferred_class (regno);
10641 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10643 if (regno >= 0 && ! FP_REGNO_P (regno))
10645 mode = SDmode;
10646 operands[0] = SUBREG_REG (operands[0]);
10647 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10650 if (lra_in_progress
10651 && mode == SDmode
10652 && (REG_P (operands[0])
10653 || (GET_CODE (operands[0]) == SUBREG
10654 && REG_P (SUBREG_REG (operands[0]))))
10655 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10656 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10658 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10659 ? SUBREG_REG (operands[0]) : operands[0]);
10660 enum reg_class cl;
10662 if (regno >= FIRST_PSEUDO_REGISTER)
10664 cl = reg_preferred_class (regno);
10665 gcc_assert (cl != NO_REGS);
10666 regno = ira_class_hard_regs[cl][0];
10668 if (FP_REGNO_P (regno))
10670 if (GET_MODE (operands[1]) != DDmode)
10671 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10672 emit_insn (gen_movsd_load (operands[0], operands[1]));
10674 else if (INT_REGNO_P (regno))
10675 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10676 else
10677 gcc_unreachable();
10678 return;
10681 /* FIXME: In the long term, this switch statement should go away
10682 and be replaced by a sequence of tests based on things like
10683 mode == Pmode. */
10684 switch (mode)
10686 case E_HImode:
10687 case E_QImode:
10688 if (CONSTANT_P (operands[1])
10689 && GET_CODE (operands[1]) != CONST_INT)
10690 operands[1] = force_const_mem (mode, operands[1]);
10691 break;
10693 case E_TFmode:
10694 case E_TDmode:
10695 case E_IFmode:
10696 case E_KFmode:
10697 if (FLOAT128_2REG_P (mode))
10698 rs6000_eliminate_indexed_memrefs (operands);
10699 /* fall through */
10701 case E_DFmode:
10702 case E_DDmode:
10703 case E_SFmode:
10704 case E_SDmode:
10705 if (CONSTANT_P (operands[1])
10706 && ! easy_fp_constant (operands[1], mode))
10707 operands[1] = force_const_mem (mode, operands[1]);
10708 break;
10710 case E_V16QImode:
10711 case E_V8HImode:
10712 case E_V4SFmode:
10713 case E_V4SImode:
10714 case E_V2SFmode:
10715 case E_V2SImode:
10716 case E_V2DFmode:
10717 case E_V2DImode:
10718 case E_V1TImode:
10719 if (CONSTANT_P (operands[1])
10720 && !easy_vector_constant (operands[1], mode))
10721 operands[1] = force_const_mem (mode, operands[1]);
10722 break;
10724 case E_SImode:
10725 case E_DImode:
10726 /* Use default pattern for address of ELF small data */
10727 if (TARGET_ELF
10728 && mode == Pmode
10729 && DEFAULT_ABI == ABI_V4
10730 && (GET_CODE (operands[1]) == SYMBOL_REF
10731 || GET_CODE (operands[1]) == CONST)
10732 && small_data_operand (operands[1], mode))
10734 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10735 return;
10738 if (DEFAULT_ABI == ABI_V4
10739 && mode == Pmode && mode == SImode
10740 && flag_pic == 1 && got_operand (operands[1], mode))
10742 emit_insn (gen_movsi_got (operands[0], operands[1]));
10743 return;
10746 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10747 && TARGET_NO_TOC
10748 && ! flag_pic
10749 && mode == Pmode
10750 && CONSTANT_P (operands[1])
10751 && GET_CODE (operands[1]) != HIGH
10752 && GET_CODE (operands[1]) != CONST_INT)
10754 rtx target = (!can_create_pseudo_p ()
10755 ? operands[0]
10756 : gen_reg_rtx (mode));
10758 /* If this is a function address on -mcall-aixdesc,
10759 convert it to the address of the descriptor. */
10760 if (DEFAULT_ABI == ABI_AIX
10761 && GET_CODE (operands[1]) == SYMBOL_REF
10762 && XSTR (operands[1], 0)[0] == '.')
10764 const char *name = XSTR (operands[1], 0);
10765 rtx new_ref;
10766 while (*name == '.')
10767 name++;
10768 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10769 CONSTANT_POOL_ADDRESS_P (new_ref)
10770 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10771 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10772 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10773 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10774 operands[1] = new_ref;
10777 if (DEFAULT_ABI == ABI_DARWIN)
10779 #if TARGET_MACHO
10780 if (MACHO_DYNAMIC_NO_PIC_P)
10782 /* Take care of any required data indirection. */
10783 operands[1] = rs6000_machopic_legitimize_pic_address (
10784 operands[1], mode, operands[0]);
10785 if (operands[0] != operands[1])
10786 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10787 return;
10789 #endif
10790 emit_insn (gen_macho_high (target, operands[1]));
10791 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10792 return;
10795 emit_insn (gen_elf_high (target, operands[1]));
10796 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10797 return;
10800 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10801 and we have put it in the TOC, we just need to make a TOC-relative
10802 reference to it. */
10803 if (TARGET_TOC
10804 && GET_CODE (operands[1]) == SYMBOL_REF
10805 && use_toc_relative_ref (operands[1], mode))
10806 operands[1] = create_TOC_reference (operands[1], operands[0]);
10807 else if (mode == Pmode
10808 && CONSTANT_P (operands[1])
10809 && GET_CODE (operands[1]) != HIGH
10810 && ((GET_CODE (operands[1]) != CONST_INT
10811 && ! easy_fp_constant (operands[1], mode))
10812 || (GET_CODE (operands[1]) == CONST_INT
10813 && (num_insns_constant (operands[1], mode)
10814 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10815 || (GET_CODE (operands[0]) == REG
10816 && FP_REGNO_P (REGNO (operands[0]))))
10817 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10818 && (TARGET_CMODEL == CMODEL_SMALL
10819 || can_create_pseudo_p ()
10820 || (REG_P (operands[0])
10821 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10824 #if TARGET_MACHO
10825 /* Darwin uses a special PIC legitimizer. */
10826 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10828 operands[1] =
10829 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10830 operands[0]);
10831 if (operands[0] != operands[1])
10832 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10833 return;
10835 #endif
10837 /* If we are to limit the number of things we put in the TOC and
10838 this is a symbol plus a constant we can add in one insn,
10839 just put the symbol in the TOC and add the constant. */
10840 if (GET_CODE (operands[1]) == CONST
10841 && TARGET_NO_SUM_IN_TOC
10842 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10843 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10844 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10845 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10846 && ! side_effects_p (operands[0]))
10848 rtx sym =
10849 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10850 rtx other = XEXP (XEXP (operands[1], 0), 1);
10852 sym = force_reg (mode, sym);
10853 emit_insn (gen_add3_insn (operands[0], sym, other));
10854 return;
10857 operands[1] = force_const_mem (mode, operands[1]);
10859 if (TARGET_TOC
10860 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10861 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10863 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10864 operands[0]);
10865 operands[1] = gen_const_mem (mode, tocref);
10866 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10869 break;
10871 case E_TImode:
10872 if (!VECTOR_MEM_VSX_P (TImode))
10873 rs6000_eliminate_indexed_memrefs (operands);
10874 break;
10876 case E_PTImode:
10877 rs6000_eliminate_indexed_memrefs (operands);
10878 break;
10880 default:
10881 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10884 /* Above, we may have called force_const_mem which may have returned
10885 an invalid address. If we can, fix this up; otherwise, reload will
10886 have to deal with it. */
10887 if (GET_CODE (operands[1]) == MEM)
10888 operands[1] = validize_mem (operands[1]);
10890 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10893 /* Nonzero if we can use a floating-point register to pass this arg. */
10894 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10895 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10896 && (CUM)->fregno <= FP_ARG_MAX_REG \
10897 && TARGET_HARD_FLOAT)
10899 /* Nonzero if we can use an AltiVec register to pass this arg. */
10900 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10901 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10902 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10903 && TARGET_ALTIVEC_ABI \
10904 && (NAMED))
10906 /* Walk down the type tree of TYPE counting consecutive base elements.
10907 If *MODEP is VOIDmode, then set it to the first valid floating point
10908 or vector type. If a non-floating point or vector type is found, or
10909 if a floating point or vector type that doesn't match a non-VOIDmode
10910 *MODEP is found, then return -1, otherwise return the count in the
10911 sub-tree. */
10913 static int
10914 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10916 machine_mode mode;
10917 HOST_WIDE_INT size;
10919 switch (TREE_CODE (type))
10921 case REAL_TYPE:
10922 mode = TYPE_MODE (type);
10923 if (!SCALAR_FLOAT_MODE_P (mode))
10924 return -1;
10926 if (*modep == VOIDmode)
10927 *modep = mode;
10929 if (*modep == mode)
10930 return 1;
10932 break;
10934 case COMPLEX_TYPE:
10935 mode = TYPE_MODE (TREE_TYPE (type));
10936 if (!SCALAR_FLOAT_MODE_P (mode))
10937 return -1;
10939 if (*modep == VOIDmode)
10940 *modep = mode;
10942 if (*modep == mode)
10943 return 2;
10945 break;
10947 case VECTOR_TYPE:
10948 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10949 return -1;
10951 /* Use V4SImode as representative of all 128-bit vector types. */
10952 size = int_size_in_bytes (type);
10953 switch (size)
10955 case 16:
10956 mode = V4SImode;
10957 break;
10958 default:
10959 return -1;
10962 if (*modep == VOIDmode)
10963 *modep = mode;
10965 /* Vector modes are considered to be opaque: two vectors are
10966 equivalent for the purposes of being homogeneous aggregates
10967 if they are the same size. */
10968 if (*modep == mode)
10969 return 1;
10971 break;
10973 case ARRAY_TYPE:
10975 int count;
10976 tree index = TYPE_DOMAIN (type);
10978 /* Can't handle incomplete types nor sizes that are not
10979 fixed. */
10980 if (!COMPLETE_TYPE_P (type)
10981 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10982 return -1;
10984 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10985 if (count == -1
10986 || !index
10987 || !TYPE_MAX_VALUE (index)
10988 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10989 || !TYPE_MIN_VALUE (index)
10990 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10991 || count < 0)
10992 return -1;
10994 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10995 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10997 /* There must be no padding. */
10998 if (wi::to_wide (TYPE_SIZE (type))
10999 != count * GET_MODE_BITSIZE (*modep))
11000 return -1;
11002 return count;
11005 case RECORD_TYPE:
11007 int count = 0;
11008 int sub_count;
11009 tree field;
11011 /* Can't handle incomplete types nor sizes that are not
11012 fixed. */
11013 if (!COMPLETE_TYPE_P (type)
11014 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11015 return -1;
11017 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11019 if (TREE_CODE (field) != FIELD_DECL)
11020 continue;
11022 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11023 if (sub_count < 0)
11024 return -1;
11025 count += sub_count;
11028 /* There must be no padding. */
11029 if (wi::to_wide (TYPE_SIZE (type))
11030 != count * GET_MODE_BITSIZE (*modep))
11031 return -1;
11033 return count;
11036 case UNION_TYPE:
11037 case QUAL_UNION_TYPE:
11039 /* These aren't very interesting except in a degenerate case. */
11040 int count = 0;
11041 int sub_count;
11042 tree field;
11044 /* Can't handle incomplete types nor sizes that are not
11045 fixed. */
11046 if (!COMPLETE_TYPE_P (type)
11047 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11048 return -1;
11050 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11052 if (TREE_CODE (field) != FIELD_DECL)
11053 continue;
11055 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11056 if (sub_count < 0)
11057 return -1;
11058 count = count > sub_count ? count : sub_count;
11061 /* There must be no padding. */
11062 if (wi::to_wide (TYPE_SIZE (type))
11063 != count * GET_MODE_BITSIZE (*modep))
11064 return -1;
11066 return count;
11069 default:
11070 break;
11073 return -1;
11076 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11077 float or vector aggregate that shall be passed in FP/vector registers
11078 according to the ELFv2 ABI, return the homogeneous element mode in
11079 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11081 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11083 static bool
11084 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11085 machine_mode *elt_mode,
11086 int *n_elts)
11088 /* Note that we do not accept complex types at the top level as
11089 homogeneous aggregates; these types are handled via the
11090 targetm.calls.split_complex_arg mechanism. Complex types
11091 can be elements of homogeneous aggregates, however. */
11092 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11094 machine_mode field_mode = VOIDmode;
11095 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11097 if (field_count > 0)
11099 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11100 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11102 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11103 up to AGGR_ARG_NUM_REG registers. */
11104 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11106 if (elt_mode)
11107 *elt_mode = field_mode;
11108 if (n_elts)
11109 *n_elts = field_count;
11110 return true;
11115 if (elt_mode)
11116 *elt_mode = mode;
11117 if (n_elts)
11118 *n_elts = 1;
11119 return false;
11122 /* Return a nonzero value to say to return the function value in
11123 memory, just as large structures are always returned. TYPE will be
11124 the data type of the value, and FNTYPE will be the type of the
11125 function doing the returning, or @code{NULL} for libcalls.
11127 The AIX ABI for the RS/6000 specifies that all structures are
11128 returned in memory. The Darwin ABI does the same.
11130 For the Darwin 64 Bit ABI, a function result can be returned in
11131 registers or in memory, depending on the size of the return data
11132 type. If it is returned in registers, the value occupies the same
11133 registers as it would if it were the first and only function
11134 argument. Otherwise, the function places its result in memory at
11135 the location pointed to by GPR3.
11137 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11138 but a draft put them in memory, and GCC used to implement the draft
11139 instead of the final standard. Therefore, aix_struct_return
11140 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11141 compatibility can change DRAFT_V4_STRUCT_RET to override the
11142 default, and -m switches get the final word. See
11143 rs6000_option_override_internal for more details.
11145 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11146 long double support is enabled. These values are returned in memory.
11148 int_size_in_bytes returns -1 for variable size objects, which go in
11149 memory always. The cast to unsigned makes -1 > 8. */
11151 static bool
11152 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11154 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11155 if (TARGET_MACHO
11156 && rs6000_darwin64_abi
11157 && TREE_CODE (type) == RECORD_TYPE
11158 && int_size_in_bytes (type) > 0)
11160 CUMULATIVE_ARGS valcum;
11161 rtx valret;
11163 valcum.words = 0;
11164 valcum.fregno = FP_ARG_MIN_REG;
11165 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11166 /* Do a trial code generation as if this were going to be passed
11167 as an argument; if any part goes in memory, we return NULL. */
11168 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11169 if (valret)
11170 return false;
11171 /* Otherwise fall through to more conventional ABI rules. */
11174 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11175 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11176 NULL, NULL))
11177 return false;
11179 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11180 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11181 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11182 return false;
11184 if (AGGREGATE_TYPE_P (type)
11185 && (aix_struct_return
11186 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11187 return true;
11189 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11190 modes only exist for GCC vector types if -maltivec. */
11191 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11192 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11193 return false;
11195 /* Return synthetic vectors in memory. */
11196 if (TREE_CODE (type) == VECTOR_TYPE
11197 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11199 static bool warned_for_return_big_vectors = false;
11200 if (!warned_for_return_big_vectors)
11202 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11203 "non-standard ABI extension with no compatibility "
11204 "guarantee");
11205 warned_for_return_big_vectors = true;
11207 return true;
11210 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11211 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11212 return true;
11214 return false;
11217 /* Specify whether values returned in registers should be at the most
11218 significant end of a register. We want aggregates returned by
11219 value to match the way aggregates are passed to functions. */
11221 static bool
11222 rs6000_return_in_msb (const_tree valtype)
11224 return (DEFAULT_ABI == ABI_ELFv2
11225 && BYTES_BIG_ENDIAN
11226 && AGGREGATE_TYPE_P (valtype)
11227 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11228 == PAD_UPWARD));
11231 #ifdef HAVE_AS_GNU_ATTRIBUTE
11232 /* Return TRUE if a call to function FNDECL may be one that
11233 potentially affects the function calling ABI of the object file. */
11235 static bool
11236 call_ABI_of_interest (tree fndecl)
11238 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11240 struct cgraph_node *c_node;
11242 /* Libcalls are always interesting. */
11243 if (fndecl == NULL_TREE)
11244 return true;
11246 /* Any call to an external function is interesting. */
11247 if (DECL_EXTERNAL (fndecl))
11248 return true;
11250 /* Interesting functions that we are emitting in this object file. */
11251 c_node = cgraph_node::get (fndecl);
11252 c_node = c_node->ultimate_alias_target ();
11253 return !c_node->only_called_directly_p ();
11255 return false;
11257 #endif
11259 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11260 for a call to a function whose data type is FNTYPE.
11261 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11263 For incoming args we set the number of arguments in the prototype large
11264 so we never return a PARALLEL. */
11266 void
11267 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11268 rtx libname ATTRIBUTE_UNUSED, int incoming,
11269 int libcall, int n_named_args,
11270 tree fndecl ATTRIBUTE_UNUSED,
11271 machine_mode return_mode ATTRIBUTE_UNUSED)
11273 static CUMULATIVE_ARGS zero_cumulative;
11275 *cum = zero_cumulative;
11276 cum->words = 0;
11277 cum->fregno = FP_ARG_MIN_REG;
11278 cum->vregno = ALTIVEC_ARG_MIN_REG;
11279 cum->prototype = (fntype && prototype_p (fntype));
11280 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11281 ? CALL_LIBCALL : CALL_NORMAL);
11282 cum->sysv_gregno = GP_ARG_MIN_REG;
11283 cum->stdarg = stdarg_p (fntype);
11284 cum->libcall = libcall;
11286 cum->nargs_prototype = 0;
11287 if (incoming || cum->prototype)
11288 cum->nargs_prototype = n_named_args;
11290 /* Check for a longcall attribute. */
11291 if ((!fntype && rs6000_default_long_calls)
11292 || (fntype
11293 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11294 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11295 cum->call_cookie |= CALL_LONG;
11297 if (TARGET_DEBUG_ARG)
11299 fprintf (stderr, "\ninit_cumulative_args:");
11300 if (fntype)
11302 tree ret_type = TREE_TYPE (fntype);
11303 fprintf (stderr, " ret code = %s,",
11304 get_tree_code_name (TREE_CODE (ret_type)));
11307 if (cum->call_cookie & CALL_LONG)
11308 fprintf (stderr, " longcall,");
11310 fprintf (stderr, " proto = %d, nargs = %d\n",
11311 cum->prototype, cum->nargs_prototype);
11314 #ifdef HAVE_AS_GNU_ATTRIBUTE
11315 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11317 cum->escapes = call_ABI_of_interest (fndecl);
11318 if (cum->escapes)
11320 tree return_type;
11322 if (fntype)
11324 return_type = TREE_TYPE (fntype);
11325 return_mode = TYPE_MODE (return_type);
11327 else
11328 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11330 if (return_type != NULL)
11332 if (TREE_CODE (return_type) == RECORD_TYPE
11333 && TYPE_TRANSPARENT_AGGR (return_type))
11335 return_type = TREE_TYPE (first_field (return_type));
11336 return_mode = TYPE_MODE (return_type);
11338 if (AGGREGATE_TYPE_P (return_type)
11339 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11340 <= 8))
11341 rs6000_returns_struct = true;
11343 if (SCALAR_FLOAT_MODE_P (return_mode))
11345 rs6000_passes_float = true;
11346 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11347 && (FLOAT128_IBM_P (return_mode)
11348 || FLOAT128_IEEE_P (return_mode)
11349 || (return_type != NULL
11350 && (TYPE_MAIN_VARIANT (return_type)
11351 == long_double_type_node))))
11352 rs6000_passes_long_double = true;
11354 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11355 || PAIRED_VECTOR_MODE (return_mode))
11356 rs6000_passes_vector = true;
11359 #endif
11361 if (fntype
11362 && !TARGET_ALTIVEC
11363 && TARGET_ALTIVEC_ABI
11364 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11366 error ("cannot return value in vector register because"
11367 " altivec instructions are disabled, use %qs"
11368 " to enable them", "-maltivec");
11372 /* The mode the ABI uses for a word. This is not the same as word_mode
11373 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11375 static scalar_int_mode
11376 rs6000_abi_word_mode (void)
11378 return TARGET_32BIT ? SImode : DImode;
11381 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11382 static char *
11383 rs6000_offload_options (void)
11385 if (TARGET_64BIT)
11386 return xstrdup ("-foffload-abi=lp64");
11387 else
11388 return xstrdup ("-foffload-abi=ilp32");
11391 /* On rs6000, function arguments are promoted, as are function return
11392 values. */
11394 static machine_mode
11395 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11396 machine_mode mode,
11397 int *punsignedp ATTRIBUTE_UNUSED,
11398 const_tree, int)
11400 PROMOTE_MODE (mode, *punsignedp, type);
11402 return mode;
11405 /* Return true if TYPE must be passed on the stack and not in registers. */
11407 static bool
11408 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11410 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11411 return must_pass_in_stack_var_size (mode, type);
11412 else
11413 return must_pass_in_stack_var_size_or_pad (mode, type);
11416 static inline bool
11417 is_complex_IBM_long_double (machine_mode mode)
11419 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11422 /* Whether ABI_V4 passes MODE args to a function in floating point
11423 registers. */
11425 static bool
11426 abi_v4_pass_in_fpr (machine_mode mode)
11428 if (!TARGET_HARD_FLOAT)
11429 return false;
11430 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11431 return true;
11432 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11433 return true;
11434 /* ABI_V4 passes complex IBM long double in 8 gprs.
11435 Stupid, but we can't change the ABI now. */
11436 if (is_complex_IBM_long_double (mode))
11437 return false;
11438 if (FLOAT128_2REG_P (mode))
11439 return true;
11440 if (DECIMAL_FLOAT_MODE_P (mode))
11441 return true;
11442 return false;
11445 /* Implement TARGET_FUNCTION_ARG_PADDING.
11447 For the AIX ABI structs are always stored left shifted in their
11448 argument slot. */
11450 static pad_direction
11451 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11453 #ifndef AGGREGATE_PADDING_FIXED
11454 #define AGGREGATE_PADDING_FIXED 0
11455 #endif
11456 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11457 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11458 #endif
11460 if (!AGGREGATE_PADDING_FIXED)
11462 /* GCC used to pass structures of the same size as integer types as
11463 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11464 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11465 passed padded downward, except that -mstrict-align further
11466 muddied the water in that multi-component structures of 2 and 4
11467 bytes in size were passed padded upward.
11469 The following arranges for best compatibility with previous
11470 versions of gcc, but removes the -mstrict-align dependency. */
11471 if (BYTES_BIG_ENDIAN)
11473 HOST_WIDE_INT size = 0;
11475 if (mode == BLKmode)
11477 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11478 size = int_size_in_bytes (type);
11480 else
11481 size = GET_MODE_SIZE (mode);
11483 if (size == 1 || size == 2 || size == 4)
11484 return PAD_DOWNWARD;
11486 return PAD_UPWARD;
11489 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11491 if (type != 0 && AGGREGATE_TYPE_P (type))
11492 return PAD_UPWARD;
11495 /* Fall back to the default. */
11496 return default_function_arg_padding (mode, type);
11499 /* If defined, a C expression that gives the alignment boundary, in bits,
11500 of an argument with the specified mode and type. If it is not defined,
11501 PARM_BOUNDARY is used for all arguments.
11503 V.4 wants long longs and doubles to be double word aligned. Just
11504 testing the mode size is a boneheaded way to do this as it means
11505 that other types such as complex int are also double word aligned.
11506 However, we're stuck with this because changing the ABI might break
11507 existing library interfaces.
11509 Quadword align Altivec/VSX vectors.
11510 Quadword align large synthetic vector types. */
11512 static unsigned int
11513 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11515 machine_mode elt_mode;
11516 int n_elts;
11518 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11520 if (DEFAULT_ABI == ABI_V4
11521 && (GET_MODE_SIZE (mode) == 8
11522 || (TARGET_HARD_FLOAT
11523 && !is_complex_IBM_long_double (mode)
11524 && FLOAT128_2REG_P (mode))))
11525 return 64;
11526 else if (FLOAT128_VECTOR_P (mode))
11527 return 128;
11528 else if (PAIRED_VECTOR_MODE (mode)
11529 || (type && TREE_CODE (type) == VECTOR_TYPE
11530 && int_size_in_bytes (type) >= 8
11531 && int_size_in_bytes (type) < 16))
11532 return 64;
11533 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11534 || (type && TREE_CODE (type) == VECTOR_TYPE
11535 && int_size_in_bytes (type) >= 16))
11536 return 128;
11538 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11539 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11540 -mcompat-align-parm is used. */
11541 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11542 || DEFAULT_ABI == ABI_ELFv2)
11543 && type && TYPE_ALIGN (type) > 64)
11545 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11546 or homogeneous float/vector aggregates here. We already handled
11547 vector aggregates above, but still need to check for float here. */
11548 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11549 && !SCALAR_FLOAT_MODE_P (elt_mode));
11551 /* We used to check for BLKmode instead of the above aggregate type
11552 check. Warn when this results in any difference to the ABI. */
11553 if (aggregate_p != (mode == BLKmode))
11555 static bool warned;
11556 if (!warned && warn_psabi)
11558 warned = true;
11559 inform (input_location,
11560 "the ABI of passing aggregates with %d-byte alignment"
11561 " has changed in GCC 5",
11562 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11566 if (aggregate_p)
11567 return 128;
11570 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11571 implement the "aggregate type" check as a BLKmode check here; this
11572 means certain aggregate types are in fact not aligned. */
11573 if (TARGET_MACHO && rs6000_darwin64_abi
11574 && mode == BLKmode
11575 && type && TYPE_ALIGN (type) > 64)
11576 return 128;
11578 return PARM_BOUNDARY;
11581 /* The offset in words to the start of the parameter save area. */
11583 static unsigned int
11584 rs6000_parm_offset (void)
11586 return (DEFAULT_ABI == ABI_V4 ? 2
11587 : DEFAULT_ABI == ABI_ELFv2 ? 4
11588 : 6);
11591 /* For a function parm of MODE and TYPE, return the starting word in
11592 the parameter area. NWORDS of the parameter area are already used. */
11594 static unsigned int
11595 rs6000_parm_start (machine_mode mode, const_tree type,
11596 unsigned int nwords)
11598 unsigned int align;
11600 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11601 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11604 /* Compute the size (in words) of a function argument. */
11606 static unsigned long
11607 rs6000_arg_size (machine_mode mode, const_tree type)
11609 unsigned long size;
11611 if (mode != BLKmode)
11612 size = GET_MODE_SIZE (mode);
11613 else
11614 size = int_size_in_bytes (type);
11616 if (TARGET_32BIT)
11617 return (size + 3) >> 2;
11618 else
11619 return (size + 7) >> 3;
11622 /* Use this to flush pending int fields. */
11624 static void
11625 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11626 HOST_WIDE_INT bitpos, int final)
11628 unsigned int startbit, endbit;
11629 int intregs, intoffset;
11631 /* Handle the situations where a float is taking up the first half
11632 of the GPR, and the other half is empty (typically due to
11633 alignment restrictions). We can detect this by a 8-byte-aligned
11634 int field, or by seeing that this is the final flush for this
11635 argument. Count the word and continue on. */
11636 if (cum->floats_in_gpr == 1
11637 && (cum->intoffset % 64 == 0
11638 || (cum->intoffset == -1 && final)))
11640 cum->words++;
11641 cum->floats_in_gpr = 0;
11644 if (cum->intoffset == -1)
11645 return;
11647 intoffset = cum->intoffset;
11648 cum->intoffset = -1;
11649 cum->floats_in_gpr = 0;
11651 if (intoffset % BITS_PER_WORD != 0)
11653 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11654 if (!int_mode_for_size (bits, 0).exists ())
11656 /* We couldn't find an appropriate mode, which happens,
11657 e.g., in packed structs when there are 3 bytes to load.
11658 Back intoffset back to the beginning of the word in this
11659 case. */
11660 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11664 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11665 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11666 intregs = (endbit - startbit) / BITS_PER_WORD;
11667 cum->words += intregs;
11668 /* words should be unsigned. */
11669 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11671 int pad = (endbit/BITS_PER_WORD) - cum->words;
11672 cum->words += pad;
11676 /* The darwin64 ABI calls for us to recurse down through structs,
11677 looking for elements passed in registers. Unfortunately, we have
11678 to track int register count here also because of misalignments
11679 in powerpc alignment mode. */
11681 static void
11682 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11683 const_tree type,
11684 HOST_WIDE_INT startbitpos)
11686 tree f;
11688 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11689 if (TREE_CODE (f) == FIELD_DECL)
11691 HOST_WIDE_INT bitpos = startbitpos;
11692 tree ftype = TREE_TYPE (f);
11693 machine_mode mode;
11694 if (ftype == error_mark_node)
11695 continue;
11696 mode = TYPE_MODE (ftype);
11698 if (DECL_SIZE (f) != 0
11699 && tree_fits_uhwi_p (bit_position (f)))
11700 bitpos += int_bit_position (f);
11702 /* ??? FIXME: else assume zero offset. */
11704 if (TREE_CODE (ftype) == RECORD_TYPE)
11705 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11706 else if (USE_FP_FOR_ARG_P (cum, mode))
11708 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11709 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11710 cum->fregno += n_fpregs;
11711 /* Single-precision floats present a special problem for
11712 us, because they are smaller than an 8-byte GPR, and so
11713 the structure-packing rules combined with the standard
11714 varargs behavior mean that we want to pack float/float
11715 and float/int combinations into a single register's
11716 space. This is complicated by the arg advance flushing,
11717 which works on arbitrarily large groups of int-type
11718 fields. */
11719 if (mode == SFmode)
11721 if (cum->floats_in_gpr == 1)
11723 /* Two floats in a word; count the word and reset
11724 the float count. */
11725 cum->words++;
11726 cum->floats_in_gpr = 0;
11728 else if (bitpos % 64 == 0)
11730 /* A float at the beginning of an 8-byte word;
11731 count it and put off adjusting cum->words until
11732 we see if a arg advance flush is going to do it
11733 for us. */
11734 cum->floats_in_gpr++;
11736 else
11738 /* The float is at the end of a word, preceded
11739 by integer fields, so the arg advance flush
11740 just above has already set cum->words and
11741 everything is taken care of. */
11744 else
11745 cum->words += n_fpregs;
11747 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11749 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11750 cum->vregno++;
11751 cum->words += 2;
11753 else if (cum->intoffset == -1)
11754 cum->intoffset = bitpos;
11758 /* Check for an item that needs to be considered specially under the darwin 64
11759 bit ABI. These are record types where the mode is BLK or the structure is
11760 8 bytes in size. */
11761 static int
11762 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11764 return rs6000_darwin64_abi
11765 && ((mode == BLKmode
11766 && TREE_CODE (type) == RECORD_TYPE
11767 && int_size_in_bytes (type) > 0)
11768 || (type && TREE_CODE (type) == RECORD_TYPE
11769 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11772 /* Update the data in CUM to advance over an argument
11773 of mode MODE and data type TYPE.
11774 (TYPE is null for libcalls where that information may not be available.)
11776 Note that for args passed by reference, function_arg will be called
11777 with MODE and TYPE set to that of the pointer to the arg, not the arg
11778 itself. */
11780 static void
11781 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11782 const_tree type, bool named, int depth)
11784 machine_mode elt_mode;
11785 int n_elts;
11787 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11789 /* Only tick off an argument if we're not recursing. */
11790 if (depth == 0)
11791 cum->nargs_prototype--;
11793 #ifdef HAVE_AS_GNU_ATTRIBUTE
11794 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11795 && cum->escapes)
11797 if (SCALAR_FLOAT_MODE_P (mode))
11799 rs6000_passes_float = true;
11800 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11801 && (FLOAT128_IBM_P (mode)
11802 || FLOAT128_IEEE_P (mode)
11803 || (type != NULL
11804 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11805 rs6000_passes_long_double = true;
11807 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11808 || (PAIRED_VECTOR_MODE (mode)
11809 && !cum->stdarg
11810 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11811 rs6000_passes_vector = true;
11813 #endif
11815 if (TARGET_ALTIVEC_ABI
11816 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11817 || (type && TREE_CODE (type) == VECTOR_TYPE
11818 && int_size_in_bytes (type) == 16)))
11820 bool stack = false;
11822 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11824 cum->vregno += n_elts;
11826 if (!TARGET_ALTIVEC)
11827 error ("cannot pass argument in vector register because"
11828 " altivec instructions are disabled, use %qs"
11829 " to enable them", "-maltivec");
11831 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11832 even if it is going to be passed in a vector register.
11833 Darwin does the same for variable-argument functions. */
11834 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11835 && TARGET_64BIT)
11836 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11837 stack = true;
11839 else
11840 stack = true;
11842 if (stack)
11844 int align;
11846 /* Vector parameters must be 16-byte aligned. In 32-bit
11847 mode this means we need to take into account the offset
11848 to the parameter save area. In 64-bit mode, they just
11849 have to start on an even word, since the parameter save
11850 area is 16-byte aligned. */
11851 if (TARGET_32BIT)
11852 align = -(rs6000_parm_offset () + cum->words) & 3;
11853 else
11854 align = cum->words & 1;
11855 cum->words += align + rs6000_arg_size (mode, type);
11857 if (TARGET_DEBUG_ARG)
11859 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11860 cum->words, align);
11861 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11862 cum->nargs_prototype, cum->prototype,
11863 GET_MODE_NAME (mode));
11867 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11869 int size = int_size_in_bytes (type);
11870 /* Variable sized types have size == -1 and are
11871 treated as if consisting entirely of ints.
11872 Pad to 16 byte boundary if needed. */
11873 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11874 && (cum->words % 2) != 0)
11875 cum->words++;
11876 /* For varargs, we can just go up by the size of the struct. */
11877 if (!named)
11878 cum->words += (size + 7) / 8;
11879 else
11881 /* It is tempting to say int register count just goes up by
11882 sizeof(type)/8, but this is wrong in a case such as
11883 { int; double; int; } [powerpc alignment]. We have to
11884 grovel through the fields for these too. */
11885 cum->intoffset = 0;
11886 cum->floats_in_gpr = 0;
11887 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11888 rs6000_darwin64_record_arg_advance_flush (cum,
11889 size * BITS_PER_UNIT, 1);
11891 if (TARGET_DEBUG_ARG)
11893 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11894 cum->words, TYPE_ALIGN (type), size);
11895 fprintf (stderr,
11896 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11897 cum->nargs_prototype, cum->prototype,
11898 GET_MODE_NAME (mode));
11901 else if (DEFAULT_ABI == ABI_V4)
11903 if (abi_v4_pass_in_fpr (mode))
11905 /* _Decimal128 must use an even/odd register pair. This assumes
11906 that the register number is odd when fregno is odd. */
11907 if (mode == TDmode && (cum->fregno % 2) == 1)
11908 cum->fregno++;
11910 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11911 <= FP_ARG_V4_MAX_REG)
11912 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11913 else
11915 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11916 if (mode == DFmode || FLOAT128_IBM_P (mode)
11917 || mode == DDmode || mode == TDmode)
11918 cum->words += cum->words & 1;
11919 cum->words += rs6000_arg_size (mode, type);
11922 else
11924 int n_words = rs6000_arg_size (mode, type);
11925 int gregno = cum->sysv_gregno;
11927 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11928 As does any other 2 word item such as complex int due to a
11929 historical mistake. */
11930 if (n_words == 2)
11931 gregno += (1 - gregno) & 1;
11933 /* Multi-reg args are not split between registers and stack. */
11934 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11936 /* Long long is aligned on the stack. So are other 2 word
11937 items such as complex int due to a historical mistake. */
11938 if (n_words == 2)
11939 cum->words += cum->words & 1;
11940 cum->words += n_words;
11943 /* Note: continuing to accumulate gregno past when we've started
11944 spilling to the stack indicates the fact that we've started
11945 spilling to the stack to expand_builtin_saveregs. */
11946 cum->sysv_gregno = gregno + n_words;
11949 if (TARGET_DEBUG_ARG)
11951 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11952 cum->words, cum->fregno);
11953 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11954 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11955 fprintf (stderr, "mode = %4s, named = %d\n",
11956 GET_MODE_NAME (mode), named);
11959 else
11961 int n_words = rs6000_arg_size (mode, type);
11962 int start_words = cum->words;
11963 int align_words = rs6000_parm_start (mode, type, start_words);
11965 cum->words = align_words + n_words;
11967 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11969 /* _Decimal128 must be passed in an even/odd float register pair.
11970 This assumes that the register number is odd when fregno is
11971 odd. */
11972 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11973 cum->fregno++;
11974 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11977 if (TARGET_DEBUG_ARG)
11979 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11980 cum->words, cum->fregno);
11981 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11982 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11983 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11984 named, align_words - start_words, depth);
11989 static void
11990 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11991 const_tree type, bool named)
11993 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11997 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11998 structure between cum->intoffset and bitpos to integer registers. */
12000 static void
12001 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12002 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12004 machine_mode mode;
12005 unsigned int regno;
12006 unsigned int startbit, endbit;
12007 int this_regno, intregs, intoffset;
12008 rtx reg;
12010 if (cum->intoffset == -1)
12011 return;
12013 intoffset = cum->intoffset;
12014 cum->intoffset = -1;
12016 /* If this is the trailing part of a word, try to only load that
12017 much into the register. Otherwise load the whole register. Note
12018 that in the latter case we may pick up unwanted bits. It's not a
12019 problem at the moment but may wish to revisit. */
12021 if (intoffset % BITS_PER_WORD != 0)
12023 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12024 if (!int_mode_for_size (bits, 0).exists (&mode))
12026 /* We couldn't find an appropriate mode, which happens,
12027 e.g., in packed structs when there are 3 bytes to load.
12028 Back intoffset back to the beginning of the word in this
12029 case. */
12030 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12031 mode = word_mode;
12034 else
12035 mode = word_mode;
12037 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12038 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12039 intregs = (endbit - startbit) / BITS_PER_WORD;
12040 this_regno = cum->words + intoffset / BITS_PER_WORD;
12042 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12043 cum->use_stack = 1;
12045 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12046 if (intregs <= 0)
12047 return;
12049 intoffset /= BITS_PER_UNIT;
12052 regno = GP_ARG_MIN_REG + this_regno;
12053 reg = gen_rtx_REG (mode, regno);
12054 rvec[(*k)++] =
12055 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12057 this_regno += 1;
12058 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12059 mode = word_mode;
12060 intregs -= 1;
12062 while (intregs > 0);
12065 /* Recursive workhorse for the following. */
12067 static void
12068 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12069 HOST_WIDE_INT startbitpos, rtx rvec[],
12070 int *k)
12072 tree f;
12074 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12075 if (TREE_CODE (f) == FIELD_DECL)
12077 HOST_WIDE_INT bitpos = startbitpos;
12078 tree ftype = TREE_TYPE (f);
12079 machine_mode mode;
12080 if (ftype == error_mark_node)
12081 continue;
12082 mode = TYPE_MODE (ftype);
12084 if (DECL_SIZE (f) != 0
12085 && tree_fits_uhwi_p (bit_position (f)))
12086 bitpos += int_bit_position (f);
12088 /* ??? FIXME: else assume zero offset. */
12090 if (TREE_CODE (ftype) == RECORD_TYPE)
12091 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12092 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12094 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12095 #if 0
12096 switch (mode)
12098 case E_SCmode: mode = SFmode; break;
12099 case E_DCmode: mode = DFmode; break;
12100 case E_TCmode: mode = TFmode; break;
12101 default: break;
12103 #endif
12104 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12105 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12107 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12108 && (mode == TFmode || mode == TDmode));
12109 /* Long double or _Decimal128 split over regs and memory. */
12110 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12111 cum->use_stack=1;
12113 rvec[(*k)++]
12114 = gen_rtx_EXPR_LIST (VOIDmode,
12115 gen_rtx_REG (mode, cum->fregno++),
12116 GEN_INT (bitpos / BITS_PER_UNIT));
12117 if (FLOAT128_2REG_P (mode))
12118 cum->fregno++;
12120 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12122 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12123 rvec[(*k)++]
12124 = gen_rtx_EXPR_LIST (VOIDmode,
12125 gen_rtx_REG (mode, cum->vregno++),
12126 GEN_INT (bitpos / BITS_PER_UNIT));
12128 else if (cum->intoffset == -1)
12129 cum->intoffset = bitpos;
12133 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12134 the register(s) to be used for each field and subfield of a struct
12135 being passed by value, along with the offset of where the
12136 register's value may be found in the block. FP fields go in FP
12137 register, vector fields go in vector registers, and everything
12138 else goes in int registers, packed as in memory.
12140 This code is also used for function return values. RETVAL indicates
12141 whether this is the case.
12143 Much of this is taken from the SPARC V9 port, which has a similar
12144 calling convention. */
12146 static rtx
12147 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12148 bool named, bool retval)
12150 rtx rvec[FIRST_PSEUDO_REGISTER];
12151 int k = 1, kbase = 1;
12152 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12153 /* This is a copy; modifications are not visible to our caller. */
12154 CUMULATIVE_ARGS copy_cum = *orig_cum;
12155 CUMULATIVE_ARGS *cum = &copy_cum;
12157 /* Pad to 16 byte boundary if needed. */
12158 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12159 && (cum->words % 2) != 0)
12160 cum->words++;
12162 cum->intoffset = 0;
12163 cum->use_stack = 0;
12164 cum->named = named;
12166 /* Put entries into rvec[] for individual FP and vector fields, and
12167 for the chunks of memory that go in int regs. Note we start at
12168 element 1; 0 is reserved for an indication of using memory, and
12169 may or may not be filled in below. */
12170 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12171 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12173 /* If any part of the struct went on the stack put all of it there.
12174 This hack is because the generic code for
12175 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12176 parts of the struct are not at the beginning. */
12177 if (cum->use_stack)
12179 if (retval)
12180 return NULL_RTX; /* doesn't go in registers at all */
12181 kbase = 0;
12182 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12184 if (k > 1 || cum->use_stack)
12185 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12186 else
12187 return NULL_RTX;
12190 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12192 static rtx
12193 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12194 int align_words)
12196 int n_units;
12197 int i, k;
12198 rtx rvec[GP_ARG_NUM_REG + 1];
12200 if (align_words >= GP_ARG_NUM_REG)
12201 return NULL_RTX;
12203 n_units = rs6000_arg_size (mode, type);
12205 /* Optimize the simple case where the arg fits in one gpr, except in
12206 the case of BLKmode due to assign_parms assuming that registers are
12207 BITS_PER_WORD wide. */
12208 if (n_units == 0
12209 || (n_units == 1 && mode != BLKmode))
12210 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12212 k = 0;
12213 if (align_words + n_units > GP_ARG_NUM_REG)
12214 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12215 using a magic NULL_RTX component.
12216 This is not strictly correct. Only some of the arg belongs in
12217 memory, not all of it. However, the normal scheme using
12218 function_arg_partial_nregs can result in unusual subregs, eg.
12219 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12220 store the whole arg to memory is often more efficient than code
12221 to store pieces, and we know that space is available in the right
12222 place for the whole arg. */
12223 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12225 i = 0;
12228 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12229 rtx off = GEN_INT (i++ * 4);
12230 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12232 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12234 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12237 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12238 but must also be copied into the parameter save area starting at
12239 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12240 to the GPRs and/or memory. Return the number of elements used. */
12242 static int
12243 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12244 int align_words, rtx *rvec)
12246 int k = 0;
12248 if (align_words < GP_ARG_NUM_REG)
12250 int n_words = rs6000_arg_size (mode, type);
12252 if (align_words + n_words > GP_ARG_NUM_REG
12253 || mode == BLKmode
12254 || (TARGET_32BIT && TARGET_POWERPC64))
12256 /* If this is partially on the stack, then we only
12257 include the portion actually in registers here. */
12258 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12259 int i = 0;
12261 if (align_words + n_words > GP_ARG_NUM_REG)
12263 /* Not all of the arg fits in gprs. Say that it goes in memory
12264 too, using a magic NULL_RTX component. Also see comment in
12265 rs6000_mixed_function_arg for why the normal
12266 function_arg_partial_nregs scheme doesn't work in this case. */
12267 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12272 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12273 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12274 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12276 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12278 else
12280 /* The whole arg fits in gprs. */
12281 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12282 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12285 else
12287 /* It's entirely in memory. */
12288 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12291 return k;
12294 /* RVEC is a vector of K components of an argument of mode MODE.
12295 Construct the final function_arg return value from it. */
12297 static rtx
12298 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12300 gcc_assert (k >= 1);
12302 /* Avoid returning a PARALLEL in the trivial cases. */
12303 if (k == 1)
12305 if (XEXP (rvec[0], 0) == NULL_RTX)
12306 return NULL_RTX;
12308 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12309 return XEXP (rvec[0], 0);
12312 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12315 /* Determine where to put an argument to a function.
12316 Value is zero to push the argument on the stack,
12317 or a hard register in which to store the argument.
12319 MODE is the argument's machine mode.
12320 TYPE is the data type of the argument (as a tree).
12321 This is null for libcalls where that information may
12322 not be available.
12323 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12324 the preceding args and about the function being called. It is
12325 not modified in this routine.
12326 NAMED is nonzero if this argument is a named parameter
12327 (otherwise it is an extra parameter matching an ellipsis).
12329 On RS/6000 the first eight words of non-FP are normally in registers
12330 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12331 Under V.4, the first 8 FP args are in registers.
12333 If this is floating-point and no prototype is specified, we use
12334 both an FP and integer register (or possibly FP reg and stack). Library
12335 functions (when CALL_LIBCALL is set) always have the proper types for args,
12336 so we can pass the FP value just in one register. emit_library_function
12337 doesn't support PARALLEL anyway.
12339 Note that for args passed by reference, function_arg will be called
12340 with MODE and TYPE set to that of the pointer to the arg, not the arg
12341 itself. */
12343 static rtx
12344 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12345 const_tree type, bool named)
12347 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12348 enum rs6000_abi abi = DEFAULT_ABI;
12349 machine_mode elt_mode;
12350 int n_elts;
12352 /* Return a marker to indicate whether CR1 needs to set or clear the
12353 bit that V.4 uses to say fp args were passed in registers.
12354 Assume that we don't need the marker for software floating point,
12355 or compiler generated library calls. */
12356 if (mode == VOIDmode)
12358 if (abi == ABI_V4
12359 && (cum->call_cookie & CALL_LIBCALL) == 0
12360 && (cum->stdarg
12361 || (cum->nargs_prototype < 0
12362 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12363 && TARGET_HARD_FLOAT)
12364 return GEN_INT (cum->call_cookie
12365 | ((cum->fregno == FP_ARG_MIN_REG)
12366 ? CALL_V4_SET_FP_ARGS
12367 : CALL_V4_CLEAR_FP_ARGS));
12369 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12372 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12374 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12376 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12377 if (rslt != NULL_RTX)
12378 return rslt;
12379 /* Else fall through to usual handling. */
12382 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12384 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12385 rtx r, off;
12386 int i, k = 0;
12388 /* Do we also need to pass this argument in the parameter save area?
12389 Library support functions for IEEE 128-bit are assumed to not need the
12390 value passed both in GPRs and in vector registers. */
12391 if (TARGET_64BIT && !cum->prototype
12392 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12394 int align_words = ROUND_UP (cum->words, 2);
12395 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12398 /* Describe where this argument goes in the vector registers. */
12399 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12401 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12402 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12403 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12406 return rs6000_finish_function_arg (mode, rvec, k);
12408 else if (TARGET_ALTIVEC_ABI
12409 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12410 || (type && TREE_CODE (type) == VECTOR_TYPE
12411 && int_size_in_bytes (type) == 16)))
12413 if (named || abi == ABI_V4)
12414 return NULL_RTX;
12415 else
12417 /* Vector parameters to varargs functions under AIX or Darwin
12418 get passed in memory and possibly also in GPRs. */
12419 int align, align_words, n_words;
12420 machine_mode part_mode;
12422 /* Vector parameters must be 16-byte aligned. In 32-bit
12423 mode this means we need to take into account the offset
12424 to the parameter save area. In 64-bit mode, they just
12425 have to start on an even word, since the parameter save
12426 area is 16-byte aligned. */
12427 if (TARGET_32BIT)
12428 align = -(rs6000_parm_offset () + cum->words) & 3;
12429 else
12430 align = cum->words & 1;
12431 align_words = cum->words + align;
12433 /* Out of registers? Memory, then. */
12434 if (align_words >= GP_ARG_NUM_REG)
12435 return NULL_RTX;
12437 if (TARGET_32BIT && TARGET_POWERPC64)
12438 return rs6000_mixed_function_arg (mode, type, align_words);
12440 /* The vector value goes in GPRs. Only the part of the
12441 value in GPRs is reported here. */
12442 part_mode = mode;
12443 n_words = rs6000_arg_size (mode, type);
12444 if (align_words + n_words > GP_ARG_NUM_REG)
12445 /* Fortunately, there are only two possibilities, the value
12446 is either wholly in GPRs or half in GPRs and half not. */
12447 part_mode = DImode;
12449 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12453 else if (abi == ABI_V4)
12455 if (abi_v4_pass_in_fpr (mode))
12457 /* _Decimal128 must use an even/odd register pair. This assumes
12458 that the register number is odd when fregno is odd. */
12459 if (mode == TDmode && (cum->fregno % 2) == 1)
12460 cum->fregno++;
12462 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12463 <= FP_ARG_V4_MAX_REG)
12464 return gen_rtx_REG (mode, cum->fregno);
12465 else
12466 return NULL_RTX;
12468 else
12470 int n_words = rs6000_arg_size (mode, type);
12471 int gregno = cum->sysv_gregno;
12473 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12474 As does any other 2 word item such as complex int due to a
12475 historical mistake. */
12476 if (n_words == 2)
12477 gregno += (1 - gregno) & 1;
12479 /* Multi-reg args are not split between registers and stack. */
12480 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12481 return NULL_RTX;
12483 if (TARGET_32BIT && TARGET_POWERPC64)
12484 return rs6000_mixed_function_arg (mode, type,
12485 gregno - GP_ARG_MIN_REG);
12486 return gen_rtx_REG (mode, gregno);
12489 else
12491 int align_words = rs6000_parm_start (mode, type, cum->words);
12493 /* _Decimal128 must be passed in an even/odd float register pair.
12494 This assumes that the register number is odd when fregno is odd. */
12495 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12496 cum->fregno++;
12498 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12500 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12501 rtx r, off;
12502 int i, k = 0;
12503 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12504 int fpr_words;
12506 /* Do we also need to pass this argument in the parameter
12507 save area? */
12508 if (type && (cum->nargs_prototype <= 0
12509 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12510 && TARGET_XL_COMPAT
12511 && align_words >= GP_ARG_NUM_REG)))
12512 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12514 /* Describe where this argument goes in the fprs. */
12515 for (i = 0; i < n_elts
12516 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12518 /* Check if the argument is split over registers and memory.
12519 This can only ever happen for long double or _Decimal128;
12520 complex types are handled via split_complex_arg. */
12521 machine_mode fmode = elt_mode;
12522 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12524 gcc_assert (FLOAT128_2REG_P (fmode));
12525 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12528 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12529 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12530 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12533 /* If there were not enough FPRs to hold the argument, the rest
12534 usually goes into memory. However, if the current position
12535 is still within the register parameter area, a portion may
12536 actually have to go into GPRs.
12538 Note that it may happen that the portion of the argument
12539 passed in the first "half" of the first GPR was already
12540 passed in the last FPR as well.
12542 For unnamed arguments, we already set up GPRs to cover the
12543 whole argument in rs6000_psave_function_arg, so there is
12544 nothing further to do at this point. */
12545 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12546 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12547 && cum->nargs_prototype > 0)
12549 static bool warned;
12551 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12552 int n_words = rs6000_arg_size (mode, type);
12554 align_words += fpr_words;
12555 n_words -= fpr_words;
12559 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12560 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12561 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12563 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12565 if (!warned && warn_psabi)
12567 warned = true;
12568 inform (input_location,
12569 "the ABI of passing homogeneous float aggregates"
12570 " has changed in GCC 5");
12574 return rs6000_finish_function_arg (mode, rvec, k);
12576 else if (align_words < GP_ARG_NUM_REG)
12578 if (TARGET_32BIT && TARGET_POWERPC64)
12579 return rs6000_mixed_function_arg (mode, type, align_words);
12581 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12583 else
12584 return NULL_RTX;
12588 /* For an arg passed partly in registers and partly in memory, this is
12589 the number of bytes passed in registers. For args passed entirely in
12590 registers or entirely in memory, zero. When an arg is described by a
12591 PARALLEL, perhaps using more than one register type, this function
12592 returns the number of bytes used by the first element of the PARALLEL. */
12594 static int
12595 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12596 tree type, bool named)
12598 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12599 bool passed_in_gprs = true;
12600 int ret = 0;
12601 int align_words;
12602 machine_mode elt_mode;
12603 int n_elts;
12605 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12607 if (DEFAULT_ABI == ABI_V4)
12608 return 0;
12610 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12612 /* If we are passing this arg in the fixed parameter save area (gprs or
12613 memory) as well as VRs, we do not use the partial bytes mechanism;
12614 instead, rs6000_function_arg will return a PARALLEL including a memory
12615 element as necessary. Library support functions for IEEE 128-bit are
12616 assumed to not need the value passed both in GPRs and in vector
12617 registers. */
12618 if (TARGET_64BIT && !cum->prototype
12619 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12620 return 0;
12622 /* Otherwise, we pass in VRs only. Check for partial copies. */
12623 passed_in_gprs = false;
12624 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12625 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12628 /* In this complicated case we just disable the partial_nregs code. */
12629 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12630 return 0;
12632 align_words = rs6000_parm_start (mode, type, cum->words);
12634 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12636 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12638 /* If we are passing this arg in the fixed parameter save area
12639 (gprs or memory) as well as FPRs, we do not use the partial
12640 bytes mechanism; instead, rs6000_function_arg will return a
12641 PARALLEL including a memory element as necessary. */
12642 if (type
12643 && (cum->nargs_prototype <= 0
12644 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12645 && TARGET_XL_COMPAT
12646 && align_words >= GP_ARG_NUM_REG)))
12647 return 0;
12649 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12650 passed_in_gprs = false;
12651 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12653 /* Compute number of bytes / words passed in FPRs. If there
12654 is still space available in the register parameter area
12655 *after* that amount, a part of the argument will be passed
12656 in GPRs. In that case, the total amount passed in any
12657 registers is equal to the amount that would have been passed
12658 in GPRs if everything were passed there, so we fall back to
12659 the GPR code below to compute the appropriate value. */
12660 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12661 * MIN (8, GET_MODE_SIZE (elt_mode)));
12662 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12664 if (align_words + fpr_words < GP_ARG_NUM_REG)
12665 passed_in_gprs = true;
12666 else
12667 ret = fpr;
12671 if (passed_in_gprs
12672 && align_words < GP_ARG_NUM_REG
12673 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12674 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12676 if (ret != 0 && TARGET_DEBUG_ARG)
12677 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12679 return ret;
12682 /* A C expression that indicates when an argument must be passed by
12683 reference. If nonzero for an argument, a copy of that argument is
12684 made in memory and a pointer to the argument is passed instead of
12685 the argument itself. The pointer is passed in whatever way is
12686 appropriate for passing a pointer to that type.
12688 Under V.4, aggregates and long double are passed by reference.
12690 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12691 reference unless the AltiVec vector extension ABI is in force.
12693 As an extension to all ABIs, variable sized types are passed by
12694 reference. */
12696 static bool
12697 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12698 machine_mode mode, const_tree type,
12699 bool named ATTRIBUTE_UNUSED)
12701 if (!type)
12702 return 0;
12704 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12705 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12707 if (TARGET_DEBUG_ARG)
12708 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12709 return 1;
12712 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12714 if (TARGET_DEBUG_ARG)
12715 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12716 return 1;
12719 if (int_size_in_bytes (type) < 0)
12721 if (TARGET_DEBUG_ARG)
12722 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12723 return 1;
12726 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12727 modes only exist for GCC vector types if -maltivec. */
12728 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12730 if (TARGET_DEBUG_ARG)
12731 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12732 return 1;
12735 /* Pass synthetic vectors in memory. */
12736 if (TREE_CODE (type) == VECTOR_TYPE
12737 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12739 static bool warned_for_pass_big_vectors = false;
12740 if (TARGET_DEBUG_ARG)
12741 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12742 if (!warned_for_pass_big_vectors)
12744 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12745 "non-standard ABI extension with no compatibility "
12746 "guarantee");
12747 warned_for_pass_big_vectors = true;
12749 return 1;
12752 return 0;
12755 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12756 already processes. Return true if the parameter must be passed
12757 (fully or partially) on the stack. */
12759 static bool
12760 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12762 machine_mode mode;
12763 int unsignedp;
12764 rtx entry_parm;
12766 /* Catch errors. */
12767 if (type == NULL || type == error_mark_node)
12768 return true;
12770 /* Handle types with no storage requirement. */
12771 if (TYPE_MODE (type) == VOIDmode)
12772 return false;
12774 /* Handle complex types. */
12775 if (TREE_CODE (type) == COMPLEX_TYPE)
12776 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12777 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12779 /* Handle transparent aggregates. */
12780 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12781 && TYPE_TRANSPARENT_AGGR (type))
12782 type = TREE_TYPE (first_field (type));
12784 /* See if this arg was passed by invisible reference. */
12785 if (pass_by_reference (get_cumulative_args (args_so_far),
12786 TYPE_MODE (type), type, true))
12787 type = build_pointer_type (type);
12789 /* Find mode as it is passed by the ABI. */
12790 unsignedp = TYPE_UNSIGNED (type);
12791 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12793 /* If we must pass in stack, we need a stack. */
12794 if (rs6000_must_pass_in_stack (mode, type))
12795 return true;
12797 /* If there is no incoming register, we need a stack. */
12798 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12799 if (entry_parm == NULL)
12800 return true;
12802 /* Likewise if we need to pass both in registers and on the stack. */
12803 if (GET_CODE (entry_parm) == PARALLEL
12804 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12805 return true;
12807 /* Also true if we're partially in registers and partially not. */
12808 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12809 return true;
12811 /* Update info on where next arg arrives in registers. */
12812 rs6000_function_arg_advance (args_so_far, mode, type, true);
12813 return false;
12816 /* Return true if FUN has no prototype, has a variable argument
12817 list, or passes any parameter in memory. */
12819 static bool
12820 rs6000_function_parms_need_stack (tree fun, bool incoming)
12822 tree fntype, result;
12823 CUMULATIVE_ARGS args_so_far_v;
12824 cumulative_args_t args_so_far;
12826 if (!fun)
12827 /* Must be a libcall, all of which only use reg parms. */
12828 return false;
12830 fntype = fun;
12831 if (!TYPE_P (fun))
12832 fntype = TREE_TYPE (fun);
12834 /* Varargs functions need the parameter save area. */
12835 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12836 return true;
12838 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12839 args_so_far = pack_cumulative_args (&args_so_far_v);
12841 /* When incoming, we will have been passed the function decl.
12842 It is necessary to use the decl to handle K&R style functions,
12843 where TYPE_ARG_TYPES may not be available. */
12844 if (incoming)
12846 gcc_assert (DECL_P (fun));
12847 result = DECL_RESULT (fun);
12849 else
12850 result = TREE_TYPE (fntype);
12852 if (result && aggregate_value_p (result, fntype))
12854 if (!TYPE_P (result))
12855 result = TREE_TYPE (result);
12856 result = build_pointer_type (result);
12857 rs6000_parm_needs_stack (args_so_far, result);
12860 if (incoming)
12862 tree parm;
12864 for (parm = DECL_ARGUMENTS (fun);
12865 parm && parm != void_list_node;
12866 parm = TREE_CHAIN (parm))
12867 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12868 return true;
12870 else
12872 function_args_iterator args_iter;
12873 tree arg_type;
12875 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12876 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12877 return true;
12880 return false;
12883 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12884 usually a constant depending on the ABI. However, in the ELFv2 ABI
12885 the register parameter area is optional when calling a function that
12886 has a prototype is scope, has no variable argument list, and passes
12887 all parameters in registers. */
12890 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12892 int reg_parm_stack_space;
12894 switch (DEFAULT_ABI)
12896 default:
12897 reg_parm_stack_space = 0;
12898 break;
12900 case ABI_AIX:
12901 case ABI_DARWIN:
12902 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12903 break;
12905 case ABI_ELFv2:
12906 /* ??? Recomputing this every time is a bit expensive. Is there
12907 a place to cache this information? */
12908 if (rs6000_function_parms_need_stack (fun, incoming))
12909 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12910 else
12911 reg_parm_stack_space = 0;
12912 break;
12915 return reg_parm_stack_space;
12918 static void
12919 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12921 int i;
12922 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12924 if (nregs == 0)
12925 return;
12927 for (i = 0; i < nregs; i++)
12929 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12930 if (reload_completed)
12932 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12933 tem = NULL_RTX;
12934 else
12935 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12936 i * GET_MODE_SIZE (reg_mode));
12938 else
12939 tem = replace_equiv_address (tem, XEXP (tem, 0));
12941 gcc_assert (tem);
12943 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12947 /* Perform any needed actions needed for a function that is receiving a
12948 variable number of arguments.
12950 CUM is as above.
12952 MODE and TYPE are the mode and type of the current parameter.
12954 PRETEND_SIZE is a variable that should be set to the amount of stack
12955 that must be pushed by the prolog to pretend that our caller pushed
12958 Normally, this macro will push all remaining incoming registers on the
12959 stack and set PRETEND_SIZE to the length of the registers pushed. */
12961 static void
12962 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12963 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12964 int no_rtl)
12966 CUMULATIVE_ARGS next_cum;
12967 int reg_size = TARGET_32BIT ? 4 : 8;
12968 rtx save_area = NULL_RTX, mem;
12969 int first_reg_offset;
12970 alias_set_type set;
12972 /* Skip the last named argument. */
12973 next_cum = *get_cumulative_args (cum);
12974 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12976 if (DEFAULT_ABI == ABI_V4)
12978 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12980 if (! no_rtl)
12982 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12983 HOST_WIDE_INT offset = 0;
12985 /* Try to optimize the size of the varargs save area.
12986 The ABI requires that ap.reg_save_area is doubleword
12987 aligned, but we don't need to allocate space for all
12988 the bytes, only those to which we actually will save
12989 anything. */
12990 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12991 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12992 if (TARGET_HARD_FLOAT
12993 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12994 && cfun->va_list_fpr_size)
12996 if (gpr_reg_num)
12997 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12998 * UNITS_PER_FP_WORD;
12999 if (cfun->va_list_fpr_size
13000 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13001 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13002 else
13003 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13004 * UNITS_PER_FP_WORD;
13006 if (gpr_reg_num)
13008 offset = -((first_reg_offset * reg_size) & ~7);
13009 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13011 gpr_reg_num = cfun->va_list_gpr_size;
13012 if (reg_size == 4 && (first_reg_offset & 1))
13013 gpr_reg_num++;
13015 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13017 else if (fpr_size)
13018 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13019 * UNITS_PER_FP_WORD
13020 - (int) (GP_ARG_NUM_REG * reg_size);
13022 if (gpr_size + fpr_size)
13024 rtx reg_save_area
13025 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13026 gcc_assert (GET_CODE (reg_save_area) == MEM);
13027 reg_save_area = XEXP (reg_save_area, 0);
13028 if (GET_CODE (reg_save_area) == PLUS)
13030 gcc_assert (XEXP (reg_save_area, 0)
13031 == virtual_stack_vars_rtx);
13032 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13033 offset += INTVAL (XEXP (reg_save_area, 1));
13035 else
13036 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13039 cfun->machine->varargs_save_offset = offset;
13040 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13043 else
13045 first_reg_offset = next_cum.words;
13046 save_area = crtl->args.internal_arg_pointer;
13048 if (targetm.calls.must_pass_in_stack (mode, type))
13049 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13052 set = get_varargs_alias_set ();
13053 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13054 && cfun->va_list_gpr_size)
13056 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13058 if (va_list_gpr_counter_field)
13059 /* V4 va_list_gpr_size counts number of registers needed. */
13060 n_gpr = cfun->va_list_gpr_size;
13061 else
13062 /* char * va_list instead counts number of bytes needed. */
13063 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13065 if (nregs > n_gpr)
13066 nregs = n_gpr;
13068 mem = gen_rtx_MEM (BLKmode,
13069 plus_constant (Pmode, save_area,
13070 first_reg_offset * reg_size));
13071 MEM_NOTRAP_P (mem) = 1;
13072 set_mem_alias_set (mem, set);
13073 set_mem_align (mem, BITS_PER_WORD);
13075 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13076 nregs);
13079 /* Save FP registers if needed. */
13080 if (DEFAULT_ABI == ABI_V4
13081 && TARGET_HARD_FLOAT
13082 && ! no_rtl
13083 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13084 && cfun->va_list_fpr_size)
13086 int fregno = next_cum.fregno, nregs;
13087 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13088 rtx lab = gen_label_rtx ();
13089 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13090 * UNITS_PER_FP_WORD);
13092 emit_jump_insn
13093 (gen_rtx_SET (pc_rtx,
13094 gen_rtx_IF_THEN_ELSE (VOIDmode,
13095 gen_rtx_NE (VOIDmode, cr1,
13096 const0_rtx),
13097 gen_rtx_LABEL_REF (VOIDmode, lab),
13098 pc_rtx)));
13100 for (nregs = 0;
13101 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13102 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13104 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13105 ? DFmode : SFmode,
13106 plus_constant (Pmode, save_area, off));
13107 MEM_NOTRAP_P (mem) = 1;
13108 set_mem_alias_set (mem, set);
13109 set_mem_align (mem, GET_MODE_ALIGNMENT (
13110 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13111 ? DFmode : SFmode));
13112 emit_move_insn (mem, gen_rtx_REG (
13113 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13114 ? DFmode : SFmode, fregno));
13117 emit_label (lab);
13121 /* Create the va_list data type. */
13123 static tree
13124 rs6000_build_builtin_va_list (void)
13126 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13128 /* For AIX, prefer 'char *' because that's what the system
13129 header files like. */
13130 if (DEFAULT_ABI != ABI_V4)
13131 return build_pointer_type (char_type_node);
13133 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13134 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13135 get_identifier ("__va_list_tag"), record);
13137 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13138 unsigned_char_type_node);
13139 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13140 unsigned_char_type_node);
13141 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13142 every user file. */
13143 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13144 get_identifier ("reserved"), short_unsigned_type_node);
13145 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13146 get_identifier ("overflow_arg_area"),
13147 ptr_type_node);
13148 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13149 get_identifier ("reg_save_area"),
13150 ptr_type_node);
13152 va_list_gpr_counter_field = f_gpr;
13153 va_list_fpr_counter_field = f_fpr;
13155 DECL_FIELD_CONTEXT (f_gpr) = record;
13156 DECL_FIELD_CONTEXT (f_fpr) = record;
13157 DECL_FIELD_CONTEXT (f_res) = record;
13158 DECL_FIELD_CONTEXT (f_ovf) = record;
13159 DECL_FIELD_CONTEXT (f_sav) = record;
13161 TYPE_STUB_DECL (record) = type_decl;
13162 TYPE_NAME (record) = type_decl;
13163 TYPE_FIELDS (record) = f_gpr;
13164 DECL_CHAIN (f_gpr) = f_fpr;
13165 DECL_CHAIN (f_fpr) = f_res;
13166 DECL_CHAIN (f_res) = f_ovf;
13167 DECL_CHAIN (f_ovf) = f_sav;
13169 layout_type (record);
13171 /* The correct type is an array type of one element. */
13172 return build_array_type (record, build_index_type (size_zero_node));
13175 /* Implement va_start. */
13177 static void
13178 rs6000_va_start (tree valist, rtx nextarg)
13180 HOST_WIDE_INT words, n_gpr, n_fpr;
13181 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13182 tree gpr, fpr, ovf, sav, t;
13184 /* Only SVR4 needs something special. */
13185 if (DEFAULT_ABI != ABI_V4)
13187 std_expand_builtin_va_start (valist, nextarg);
13188 return;
13191 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13192 f_fpr = DECL_CHAIN (f_gpr);
13193 f_res = DECL_CHAIN (f_fpr);
13194 f_ovf = DECL_CHAIN (f_res);
13195 f_sav = DECL_CHAIN (f_ovf);
13197 valist = build_simple_mem_ref (valist);
13198 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13199 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13200 f_fpr, NULL_TREE);
13201 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13202 f_ovf, NULL_TREE);
13203 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13204 f_sav, NULL_TREE);
13206 /* Count number of gp and fp argument registers used. */
13207 words = crtl->args.info.words;
13208 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13209 GP_ARG_NUM_REG);
13210 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13211 FP_ARG_NUM_REG);
13213 if (TARGET_DEBUG_ARG)
13214 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13215 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13216 words, n_gpr, n_fpr);
13218 if (cfun->va_list_gpr_size)
13220 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13221 build_int_cst (NULL_TREE, n_gpr));
13222 TREE_SIDE_EFFECTS (t) = 1;
13223 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13226 if (cfun->va_list_fpr_size)
13228 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13229 build_int_cst (NULL_TREE, n_fpr));
13230 TREE_SIDE_EFFECTS (t) = 1;
13231 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13233 #ifdef HAVE_AS_GNU_ATTRIBUTE
13234 if (call_ABI_of_interest (cfun->decl))
13235 rs6000_passes_float = true;
13236 #endif
13239 /* Find the overflow area. */
13240 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13241 if (words != 0)
13242 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13243 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13244 TREE_SIDE_EFFECTS (t) = 1;
13245 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13247 /* If there were no va_arg invocations, don't set up the register
13248 save area. */
13249 if (!cfun->va_list_gpr_size
13250 && !cfun->va_list_fpr_size
13251 && n_gpr < GP_ARG_NUM_REG
13252 && n_fpr < FP_ARG_V4_MAX_REG)
13253 return;
13255 /* Find the register save area. */
13256 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13257 if (cfun->machine->varargs_save_offset)
13258 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13259 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13260 TREE_SIDE_EFFECTS (t) = 1;
13261 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13264 /* Implement va_arg. */
13266 static tree
13267 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13268 gimple_seq *post_p)
13270 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13271 tree gpr, fpr, ovf, sav, reg, t, u;
13272 int size, rsize, n_reg, sav_ofs, sav_scale;
13273 tree lab_false, lab_over, addr;
13274 int align;
13275 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13276 int regalign = 0;
13277 gimple *stmt;
13279 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13281 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13282 return build_va_arg_indirect_ref (t);
13285 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13286 earlier version of gcc, with the property that it always applied alignment
13287 adjustments to the va-args (even for zero-sized types). The cheapest way
13288 to deal with this is to replicate the effect of the part of
13289 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13290 of relevance.
13291 We don't need to check for pass-by-reference because of the test above.
13292 We can return a simplifed answer, since we know there's no offset to add. */
13294 if (((TARGET_MACHO
13295 && rs6000_darwin64_abi)
13296 || DEFAULT_ABI == ABI_ELFv2
13297 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13298 && integer_zerop (TYPE_SIZE (type)))
13300 unsigned HOST_WIDE_INT align, boundary;
13301 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13302 align = PARM_BOUNDARY / BITS_PER_UNIT;
13303 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13304 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13305 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13306 boundary /= BITS_PER_UNIT;
13307 if (boundary > align)
13309 tree t ;
13310 /* This updates arg ptr by the amount that would be necessary
13311 to align the zero-sized (but not zero-alignment) item. */
13312 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13313 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13314 gimplify_and_add (t, pre_p);
13316 t = fold_convert (sizetype, valist_tmp);
13317 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13318 fold_convert (TREE_TYPE (valist),
13319 fold_build2 (BIT_AND_EXPR, sizetype, t,
13320 size_int (-boundary))));
13321 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13322 gimplify_and_add (t, pre_p);
13324 /* Since it is zero-sized there's no increment for the item itself. */
13325 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13326 return build_va_arg_indirect_ref (valist_tmp);
13329 if (DEFAULT_ABI != ABI_V4)
13331 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13333 tree elem_type = TREE_TYPE (type);
13334 machine_mode elem_mode = TYPE_MODE (elem_type);
13335 int elem_size = GET_MODE_SIZE (elem_mode);
13337 if (elem_size < UNITS_PER_WORD)
13339 tree real_part, imag_part;
13340 gimple_seq post = NULL;
13342 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13343 &post);
13344 /* Copy the value into a temporary, lest the formal temporary
13345 be reused out from under us. */
13346 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13347 gimple_seq_add_seq (pre_p, post);
13349 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13350 post_p);
13352 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13356 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13359 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13360 f_fpr = DECL_CHAIN (f_gpr);
13361 f_res = DECL_CHAIN (f_fpr);
13362 f_ovf = DECL_CHAIN (f_res);
13363 f_sav = DECL_CHAIN (f_ovf);
13365 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13366 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13367 f_fpr, NULL_TREE);
13368 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13369 f_ovf, NULL_TREE);
13370 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13371 f_sav, NULL_TREE);
13373 size = int_size_in_bytes (type);
13374 rsize = (size + 3) / 4;
13375 int pad = 4 * rsize - size;
13376 align = 1;
13378 machine_mode mode = TYPE_MODE (type);
13379 if (abi_v4_pass_in_fpr (mode))
13381 /* FP args go in FP registers, if present. */
13382 reg = fpr;
13383 n_reg = (size + 7) / 8;
13384 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13385 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13386 if (mode != SFmode && mode != SDmode)
13387 align = 8;
13389 else
13391 /* Otherwise into GP registers. */
13392 reg = gpr;
13393 n_reg = rsize;
13394 sav_ofs = 0;
13395 sav_scale = 4;
13396 if (n_reg == 2)
13397 align = 8;
13400 /* Pull the value out of the saved registers.... */
13402 lab_over = NULL;
13403 addr = create_tmp_var (ptr_type_node, "addr");
13405 /* AltiVec vectors never go in registers when -mabi=altivec. */
13406 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13407 align = 16;
13408 else
13410 lab_false = create_artificial_label (input_location);
13411 lab_over = create_artificial_label (input_location);
13413 /* Long long is aligned in the registers. As are any other 2 gpr
13414 item such as complex int due to a historical mistake. */
13415 u = reg;
13416 if (n_reg == 2 && reg == gpr)
13418 regalign = 1;
13419 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13420 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13421 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13422 unshare_expr (reg), u);
13424 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13425 reg number is 0 for f1, so we want to make it odd. */
13426 else if (reg == fpr && mode == TDmode)
13428 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13429 build_int_cst (TREE_TYPE (reg), 1));
13430 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13433 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13434 t = build2 (GE_EXPR, boolean_type_node, u, t);
13435 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13436 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13437 gimplify_and_add (t, pre_p);
13439 t = sav;
13440 if (sav_ofs)
13441 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13443 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13444 build_int_cst (TREE_TYPE (reg), n_reg));
13445 u = fold_convert (sizetype, u);
13446 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13447 t = fold_build_pointer_plus (t, u);
13449 /* _Decimal32 varargs are located in the second word of the 64-bit
13450 FP register for 32-bit binaries. */
13451 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13452 t = fold_build_pointer_plus_hwi (t, size);
13454 /* Args are passed right-aligned. */
13455 if (BYTES_BIG_ENDIAN)
13456 t = fold_build_pointer_plus_hwi (t, pad);
13458 gimplify_assign (addr, t, pre_p);
13460 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13462 stmt = gimple_build_label (lab_false);
13463 gimple_seq_add_stmt (pre_p, stmt);
13465 if ((n_reg == 2 && !regalign) || n_reg > 2)
13467 /* Ensure that we don't find any more args in regs.
13468 Alignment has taken care of for special cases. */
13469 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13473 /* ... otherwise out of the overflow area. */
13475 /* Care for on-stack alignment if needed. */
13476 t = ovf;
13477 if (align != 1)
13479 t = fold_build_pointer_plus_hwi (t, align - 1);
13480 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13481 build_int_cst (TREE_TYPE (t), -align));
13484 /* Args are passed right-aligned. */
13485 if (BYTES_BIG_ENDIAN)
13486 t = fold_build_pointer_plus_hwi (t, pad);
13488 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13490 gimplify_assign (unshare_expr (addr), t, pre_p);
13492 t = fold_build_pointer_plus_hwi (t, size);
13493 gimplify_assign (unshare_expr (ovf), t, pre_p);
13495 if (lab_over)
13497 stmt = gimple_build_label (lab_over);
13498 gimple_seq_add_stmt (pre_p, stmt);
13501 if (STRICT_ALIGNMENT
13502 && (TYPE_ALIGN (type)
13503 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13505 /* The value (of type complex double, for example) may not be
13506 aligned in memory in the saved registers, so copy via a
13507 temporary. (This is the same code as used for SPARC.) */
13508 tree tmp = create_tmp_var (type, "va_arg_tmp");
13509 tree dest_addr = build_fold_addr_expr (tmp);
13511 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13512 3, dest_addr, addr, size_int (rsize * 4));
13514 gimplify_and_add (copy, pre_p);
13515 addr = dest_addr;
13518 addr = fold_convert (ptrtype, addr);
13519 return build_va_arg_indirect_ref (addr);
13522 /* Builtins. */
13524 static void
13525 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13527 tree t;
13528 unsigned classify = rs6000_builtin_info[(int)code].attr;
13529 const char *attr_string = "";
13531 gcc_assert (name != NULL);
13532 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13534 if (rs6000_builtin_decls[(int)code])
13535 fatal_error (input_location,
13536 "internal error: builtin function %qs already processed",
13537 name);
13539 rs6000_builtin_decls[(int)code] = t =
13540 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13542 /* Set any special attributes. */
13543 if ((classify & RS6000_BTC_CONST) != 0)
13545 /* const function, function only depends on the inputs. */
13546 TREE_READONLY (t) = 1;
13547 TREE_NOTHROW (t) = 1;
13548 attr_string = ", const";
13550 else if ((classify & RS6000_BTC_PURE) != 0)
13552 /* pure function, function can read global memory, but does not set any
13553 external state. */
13554 DECL_PURE_P (t) = 1;
13555 TREE_NOTHROW (t) = 1;
13556 attr_string = ", pure";
13558 else if ((classify & RS6000_BTC_FP) != 0)
13560 /* Function is a math function. If rounding mode is on, then treat the
13561 function as not reading global memory, but it can have arbitrary side
13562 effects. If it is off, then assume the function is a const function.
13563 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13564 builtin-attribute.def that is used for the math functions. */
13565 TREE_NOTHROW (t) = 1;
13566 if (flag_rounding_math)
13568 DECL_PURE_P (t) = 1;
13569 DECL_IS_NOVOPS (t) = 1;
13570 attr_string = ", fp, pure";
13572 else
13574 TREE_READONLY (t) = 1;
13575 attr_string = ", fp, const";
13578 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13579 gcc_unreachable ();
13581 if (TARGET_DEBUG_BUILTIN)
13582 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13583 (int)code, name, attr_string);
13586 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13588 #undef RS6000_BUILTIN_0
13589 #undef RS6000_BUILTIN_1
13590 #undef RS6000_BUILTIN_2
13591 #undef RS6000_BUILTIN_3
13592 #undef RS6000_BUILTIN_A
13593 #undef RS6000_BUILTIN_D
13594 #undef RS6000_BUILTIN_H
13595 #undef RS6000_BUILTIN_P
13596 #undef RS6000_BUILTIN_Q
13597 #undef RS6000_BUILTIN_X
13599 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13600 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13601 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13602 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13603 { MASK, ICODE, NAME, ENUM },
13605 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13606 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13607 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13608 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13609 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13610 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13612 static const struct builtin_description bdesc_3arg[] =
13614 #include "rs6000-builtin.def"
13617 /* DST operations: void foo (void *, const int, const char). */
13619 #undef RS6000_BUILTIN_0
13620 #undef RS6000_BUILTIN_1
13621 #undef RS6000_BUILTIN_2
13622 #undef RS6000_BUILTIN_3
13623 #undef RS6000_BUILTIN_A
13624 #undef RS6000_BUILTIN_D
13625 #undef RS6000_BUILTIN_H
13626 #undef RS6000_BUILTIN_P
13627 #undef RS6000_BUILTIN_Q
13628 #undef RS6000_BUILTIN_X
13630 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13631 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13634 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13635 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13636 { MASK, ICODE, NAME, ENUM },
13638 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13639 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13640 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13641 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13643 static const struct builtin_description bdesc_dst[] =
13645 #include "rs6000-builtin.def"
13648 /* Simple binary operations: VECc = foo (VECa, VECb). */
13650 #undef RS6000_BUILTIN_0
13651 #undef RS6000_BUILTIN_1
13652 #undef RS6000_BUILTIN_2
13653 #undef RS6000_BUILTIN_3
13654 #undef RS6000_BUILTIN_A
13655 #undef RS6000_BUILTIN_D
13656 #undef RS6000_BUILTIN_H
13657 #undef RS6000_BUILTIN_P
13658 #undef RS6000_BUILTIN_Q
13659 #undef RS6000_BUILTIN_X
13661 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13663 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13664 { MASK, ICODE, NAME, ENUM },
13666 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13667 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13668 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13669 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13670 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13671 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13672 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13674 static const struct builtin_description bdesc_2arg[] =
13676 #include "rs6000-builtin.def"
13679 #undef RS6000_BUILTIN_0
13680 #undef RS6000_BUILTIN_1
13681 #undef RS6000_BUILTIN_2
13682 #undef RS6000_BUILTIN_3
13683 #undef RS6000_BUILTIN_A
13684 #undef RS6000_BUILTIN_D
13685 #undef RS6000_BUILTIN_H
13686 #undef RS6000_BUILTIN_P
13687 #undef RS6000_BUILTIN_Q
13688 #undef RS6000_BUILTIN_X
13690 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13691 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13692 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13693 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13695 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13696 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13698 { MASK, ICODE, NAME, ENUM },
13700 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13701 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13703 /* AltiVec predicates. */
13705 static const struct builtin_description bdesc_altivec_preds[] =
13707 #include "rs6000-builtin.def"
13710 /* PAIRED predicates. */
13711 #undef RS6000_BUILTIN_0
13712 #undef RS6000_BUILTIN_1
13713 #undef RS6000_BUILTIN_2
13714 #undef RS6000_BUILTIN_3
13715 #undef RS6000_BUILTIN_A
13716 #undef RS6000_BUILTIN_D
13717 #undef RS6000_BUILTIN_H
13718 #undef RS6000_BUILTIN_P
13719 #undef RS6000_BUILTIN_Q
13720 #undef RS6000_BUILTIN_X
13722 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13725 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13726 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13727 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13729 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13730 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13731 { MASK, ICODE, NAME, ENUM },
13733 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13735 static const struct builtin_description bdesc_paired_preds[] =
13737 #include "rs6000-builtin.def"
13740 /* ABS* operations. */
13742 #undef RS6000_BUILTIN_0
13743 #undef RS6000_BUILTIN_1
13744 #undef RS6000_BUILTIN_2
13745 #undef RS6000_BUILTIN_3
13746 #undef RS6000_BUILTIN_A
13747 #undef RS6000_BUILTIN_D
13748 #undef RS6000_BUILTIN_H
13749 #undef RS6000_BUILTIN_P
13750 #undef RS6000_BUILTIN_Q
13751 #undef RS6000_BUILTIN_X
13753 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13758 { MASK, ICODE, NAME, ENUM },
13760 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13761 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13762 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13763 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13764 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13766 static const struct builtin_description bdesc_abs[] =
13768 #include "rs6000-builtin.def"
13771 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13772 foo (VECa). */
13774 #undef RS6000_BUILTIN_0
13775 #undef RS6000_BUILTIN_1
13776 #undef RS6000_BUILTIN_2
13777 #undef RS6000_BUILTIN_3
13778 #undef RS6000_BUILTIN_A
13779 #undef RS6000_BUILTIN_D
13780 #undef RS6000_BUILTIN_H
13781 #undef RS6000_BUILTIN_P
13782 #undef RS6000_BUILTIN_Q
13783 #undef RS6000_BUILTIN_X
13785 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13786 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13787 { MASK, ICODE, NAME, ENUM },
13789 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13792 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13793 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13794 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13795 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13796 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13798 static const struct builtin_description bdesc_1arg[] =
13800 #include "rs6000-builtin.def"
13803 /* Simple no-argument operations: result = __builtin_darn_32 () */
13805 #undef RS6000_BUILTIN_0
13806 #undef RS6000_BUILTIN_1
13807 #undef RS6000_BUILTIN_2
13808 #undef RS6000_BUILTIN_3
13809 #undef RS6000_BUILTIN_A
13810 #undef RS6000_BUILTIN_D
13811 #undef RS6000_BUILTIN_H
13812 #undef RS6000_BUILTIN_P
13813 #undef RS6000_BUILTIN_Q
13814 #undef RS6000_BUILTIN_X
13816 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13817 { MASK, ICODE, NAME, ENUM },
13819 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13823 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13824 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13825 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13826 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13827 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13829 static const struct builtin_description bdesc_0arg[] =
13831 #include "rs6000-builtin.def"
13834 /* HTM builtins. */
13835 #undef RS6000_BUILTIN_0
13836 #undef RS6000_BUILTIN_1
13837 #undef RS6000_BUILTIN_2
13838 #undef RS6000_BUILTIN_3
13839 #undef RS6000_BUILTIN_A
13840 #undef RS6000_BUILTIN_D
13841 #undef RS6000_BUILTIN_H
13842 #undef RS6000_BUILTIN_P
13843 #undef RS6000_BUILTIN_Q
13844 #undef RS6000_BUILTIN_X
13846 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13847 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13848 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13851 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13852 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13853 { MASK, ICODE, NAME, ENUM },
13855 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13856 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13857 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13859 static const struct builtin_description bdesc_htm[] =
13861 #include "rs6000-builtin.def"
13864 #undef RS6000_BUILTIN_0
13865 #undef RS6000_BUILTIN_1
13866 #undef RS6000_BUILTIN_2
13867 #undef RS6000_BUILTIN_3
13868 #undef RS6000_BUILTIN_A
13869 #undef RS6000_BUILTIN_D
13870 #undef RS6000_BUILTIN_H
13871 #undef RS6000_BUILTIN_P
13872 #undef RS6000_BUILTIN_Q
13874 /* Return true if a builtin function is overloaded. */
13875 bool
13876 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13878 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13881 const char *
13882 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13884 return rs6000_builtin_info[(int)fncode].name;
13887 /* Expand an expression EXP that calls a builtin without arguments. */
13888 static rtx
13889 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13891 rtx pat;
13892 machine_mode tmode = insn_data[icode].operand[0].mode;
13894 if (icode == CODE_FOR_nothing)
13895 /* Builtin not supported on this processor. */
13896 return 0;
13898 if (target == 0
13899 || GET_MODE (target) != tmode
13900 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13901 target = gen_reg_rtx (tmode);
13903 pat = GEN_FCN (icode) (target);
13904 if (! pat)
13905 return 0;
13906 emit_insn (pat);
13908 return target;
13912 static rtx
13913 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13915 rtx pat;
13916 tree arg0 = CALL_EXPR_ARG (exp, 0);
13917 tree arg1 = CALL_EXPR_ARG (exp, 1);
13918 rtx op0 = expand_normal (arg0);
13919 rtx op1 = expand_normal (arg1);
13920 machine_mode mode0 = insn_data[icode].operand[0].mode;
13921 machine_mode mode1 = insn_data[icode].operand[1].mode;
13923 if (icode == CODE_FOR_nothing)
13924 /* Builtin not supported on this processor. */
13925 return 0;
13927 /* If we got invalid arguments bail out before generating bad rtl. */
13928 if (arg0 == error_mark_node || arg1 == error_mark_node)
13929 return const0_rtx;
13931 if (GET_CODE (op0) != CONST_INT
13932 || INTVAL (op0) > 255
13933 || INTVAL (op0) < 0)
13935 error ("argument 1 must be an 8-bit field value");
13936 return const0_rtx;
13939 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13940 op0 = copy_to_mode_reg (mode0, op0);
13942 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13943 op1 = copy_to_mode_reg (mode1, op1);
13945 pat = GEN_FCN (icode) (op0, op1);
13946 if (! pat)
13947 return const0_rtx;
13948 emit_insn (pat);
13950 return NULL_RTX;
13953 static rtx
13954 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13956 rtx pat;
13957 tree arg0 = CALL_EXPR_ARG (exp, 0);
13958 rtx op0 = expand_normal (arg0);
13959 machine_mode tmode = insn_data[icode].operand[0].mode;
13960 machine_mode mode0 = insn_data[icode].operand[1].mode;
13962 if (icode == CODE_FOR_nothing)
13963 /* Builtin not supported on this processor. */
13964 return 0;
13966 /* If we got invalid arguments bail out before generating bad rtl. */
13967 if (arg0 == error_mark_node)
13968 return const0_rtx;
13970 if (icode == CODE_FOR_altivec_vspltisb
13971 || icode == CODE_FOR_altivec_vspltish
13972 || icode == CODE_FOR_altivec_vspltisw)
13974 /* Only allow 5-bit *signed* literals. */
13975 if (GET_CODE (op0) != CONST_INT
13976 || INTVAL (op0) > 15
13977 || INTVAL (op0) < -16)
13979 error ("argument 1 must be a 5-bit signed literal");
13980 return CONST0_RTX (tmode);
13984 if (target == 0
13985 || GET_MODE (target) != tmode
13986 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13987 target = gen_reg_rtx (tmode);
13989 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13990 op0 = copy_to_mode_reg (mode0, op0);
13992 pat = GEN_FCN (icode) (target, op0);
13993 if (! pat)
13994 return 0;
13995 emit_insn (pat);
13997 return target;
14000 static rtx
14001 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14003 rtx pat, scratch1, scratch2;
14004 tree arg0 = CALL_EXPR_ARG (exp, 0);
14005 rtx op0 = expand_normal (arg0);
14006 machine_mode tmode = insn_data[icode].operand[0].mode;
14007 machine_mode mode0 = insn_data[icode].operand[1].mode;
14009 /* If we have invalid arguments, bail out before generating bad rtl. */
14010 if (arg0 == error_mark_node)
14011 return const0_rtx;
14013 if (target == 0
14014 || GET_MODE (target) != tmode
14015 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14016 target = gen_reg_rtx (tmode);
14018 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14019 op0 = copy_to_mode_reg (mode0, op0);
14021 scratch1 = gen_reg_rtx (mode0);
14022 scratch2 = gen_reg_rtx (mode0);
14024 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14025 if (! pat)
14026 return 0;
14027 emit_insn (pat);
14029 return target;
14032 static rtx
14033 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14035 rtx pat;
14036 tree arg0 = CALL_EXPR_ARG (exp, 0);
14037 tree arg1 = CALL_EXPR_ARG (exp, 1);
14038 rtx op0 = expand_normal (arg0);
14039 rtx op1 = expand_normal (arg1);
14040 machine_mode tmode = insn_data[icode].operand[0].mode;
14041 machine_mode mode0 = insn_data[icode].operand[1].mode;
14042 machine_mode mode1 = insn_data[icode].operand[2].mode;
14044 if (icode == CODE_FOR_nothing)
14045 /* Builtin not supported on this processor. */
14046 return 0;
14048 /* If we got invalid arguments bail out before generating bad rtl. */
14049 if (arg0 == error_mark_node || arg1 == error_mark_node)
14050 return const0_rtx;
14052 if (icode == CODE_FOR_altivec_vcfux
14053 || icode == CODE_FOR_altivec_vcfsx
14054 || icode == CODE_FOR_altivec_vctsxs
14055 || icode == CODE_FOR_altivec_vctuxs
14056 || icode == CODE_FOR_altivec_vspltb
14057 || icode == CODE_FOR_altivec_vsplth
14058 || icode == CODE_FOR_altivec_vspltw)
14060 /* Only allow 5-bit unsigned literals. */
14061 STRIP_NOPS (arg1);
14062 if (TREE_CODE (arg1) != INTEGER_CST
14063 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14065 error ("argument 2 must be a 5-bit unsigned literal");
14066 return CONST0_RTX (tmode);
14069 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14070 || icode == CODE_FOR_dfptstsfi_lt_dd
14071 || icode == CODE_FOR_dfptstsfi_gt_dd
14072 || icode == CODE_FOR_dfptstsfi_unordered_dd
14073 || icode == CODE_FOR_dfptstsfi_eq_td
14074 || icode == CODE_FOR_dfptstsfi_lt_td
14075 || icode == CODE_FOR_dfptstsfi_gt_td
14076 || icode == CODE_FOR_dfptstsfi_unordered_td)
14078 /* Only allow 6-bit unsigned literals. */
14079 STRIP_NOPS (arg0);
14080 if (TREE_CODE (arg0) != INTEGER_CST
14081 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14083 error ("argument 1 must be a 6-bit unsigned literal");
14084 return CONST0_RTX (tmode);
14087 else if (icode == CODE_FOR_xststdcqp_kf
14088 || icode == CODE_FOR_xststdcqp_tf
14089 || icode == CODE_FOR_xststdcdp
14090 || icode == CODE_FOR_xststdcsp
14091 || icode == CODE_FOR_xvtstdcdp
14092 || icode == CODE_FOR_xvtstdcsp)
14094 /* Only allow 7-bit unsigned literals. */
14095 STRIP_NOPS (arg1);
14096 if (TREE_CODE (arg1) != INTEGER_CST
14097 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14099 error ("argument 2 must be a 7-bit unsigned literal");
14100 return CONST0_RTX (tmode);
14103 else if (icode == CODE_FOR_unpackv1ti
14104 || icode == CODE_FOR_unpackkf
14105 || icode == CODE_FOR_unpacktf
14106 || icode == CODE_FOR_unpackif
14107 || icode == CODE_FOR_unpacktd)
14109 /* Only allow 1-bit unsigned literals. */
14110 STRIP_NOPS (arg1);
14111 if (TREE_CODE (arg1) != INTEGER_CST
14112 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14114 error ("argument 2 must be a 1-bit unsigned literal");
14115 return CONST0_RTX (tmode);
14119 if (target == 0
14120 || GET_MODE (target) != tmode
14121 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14122 target = gen_reg_rtx (tmode);
14124 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14125 op0 = copy_to_mode_reg (mode0, op0);
14126 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14127 op1 = copy_to_mode_reg (mode1, op1);
14129 pat = GEN_FCN (icode) (target, op0, op1);
14130 if (! pat)
14131 return 0;
14132 emit_insn (pat);
14134 return target;
14137 static rtx
14138 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14140 rtx pat, scratch;
14141 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14142 tree arg0 = CALL_EXPR_ARG (exp, 1);
14143 tree arg1 = CALL_EXPR_ARG (exp, 2);
14144 rtx op0 = expand_normal (arg0);
14145 rtx op1 = expand_normal (arg1);
14146 machine_mode tmode = SImode;
14147 machine_mode mode0 = insn_data[icode].operand[1].mode;
14148 machine_mode mode1 = insn_data[icode].operand[2].mode;
14149 int cr6_form_int;
14151 if (TREE_CODE (cr6_form) != INTEGER_CST)
14153 error ("argument 1 of %qs must be a constant",
14154 "__builtin_altivec_predicate");
14155 return const0_rtx;
14157 else
14158 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14160 gcc_assert (mode0 == mode1);
14162 /* If we have invalid arguments, bail out before generating bad rtl. */
14163 if (arg0 == error_mark_node || arg1 == error_mark_node)
14164 return const0_rtx;
14166 if (target == 0
14167 || GET_MODE (target) != tmode
14168 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14169 target = gen_reg_rtx (tmode);
14171 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14172 op0 = copy_to_mode_reg (mode0, op0);
14173 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14174 op1 = copy_to_mode_reg (mode1, op1);
14176 /* Note that for many of the relevant operations (e.g. cmpne or
14177 cmpeq) with float or double operands, it makes more sense for the
14178 mode of the allocated scratch register to select a vector of
14179 integer. But the choice to copy the mode of operand 0 was made
14180 long ago and there are no plans to change it. */
14181 scratch = gen_reg_rtx (mode0);
14183 pat = GEN_FCN (icode) (scratch, op0, op1);
14184 if (! pat)
14185 return 0;
14186 emit_insn (pat);
14188 /* The vec_any* and vec_all* predicates use the same opcodes for two
14189 different operations, but the bits in CR6 will be different
14190 depending on what information we want. So we have to play tricks
14191 with CR6 to get the right bits out.
14193 If you think this is disgusting, look at the specs for the
14194 AltiVec predicates. */
14196 switch (cr6_form_int)
14198 case 0:
14199 emit_insn (gen_cr6_test_for_zero (target));
14200 break;
14201 case 1:
14202 emit_insn (gen_cr6_test_for_zero_reverse (target));
14203 break;
14204 case 2:
14205 emit_insn (gen_cr6_test_for_lt (target));
14206 break;
14207 case 3:
14208 emit_insn (gen_cr6_test_for_lt_reverse (target));
14209 break;
14210 default:
14211 error ("argument 1 of %qs is out of range",
14212 "__builtin_altivec_predicate");
14213 break;
14216 return target;
14219 static rtx
14220 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14222 rtx pat, addr;
14223 tree arg0 = CALL_EXPR_ARG (exp, 0);
14224 tree arg1 = CALL_EXPR_ARG (exp, 1);
14225 machine_mode tmode = insn_data[icode].operand[0].mode;
14226 machine_mode mode0 = Pmode;
14227 machine_mode mode1 = Pmode;
14228 rtx op0 = expand_normal (arg0);
14229 rtx op1 = expand_normal (arg1);
14231 if (icode == CODE_FOR_nothing)
14232 /* Builtin not supported on this processor. */
14233 return 0;
14235 /* If we got invalid arguments bail out before generating bad rtl. */
14236 if (arg0 == error_mark_node || arg1 == error_mark_node)
14237 return const0_rtx;
14239 if (target == 0
14240 || GET_MODE (target) != tmode
14241 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14242 target = gen_reg_rtx (tmode);
14244 op1 = copy_to_mode_reg (mode1, op1);
14246 if (op0 == const0_rtx)
14248 addr = gen_rtx_MEM (tmode, op1);
14250 else
14252 op0 = copy_to_mode_reg (mode0, op0);
14253 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14256 pat = GEN_FCN (icode) (target, addr);
14258 if (! pat)
14259 return 0;
14260 emit_insn (pat);
14262 return target;
14265 /* Return a constant vector for use as a little-endian permute control vector
14266 to reverse the order of elements of the given vector mode. */
14267 static rtx
14268 swap_selector_for_mode (machine_mode mode)
14270 /* These are little endian vectors, so their elements are reversed
14271 from what you would normally expect for a permute control vector. */
14272 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14273 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14274 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14275 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14276 unsigned int *swaparray, i;
14277 rtx perm[16];
14279 switch (mode)
14281 case E_V2DFmode:
14282 case E_V2DImode:
14283 swaparray = swap2;
14284 break;
14285 case E_V4SFmode:
14286 case E_V4SImode:
14287 swaparray = swap4;
14288 break;
14289 case E_V8HImode:
14290 swaparray = swap8;
14291 break;
14292 case E_V16QImode:
14293 swaparray = swap16;
14294 break;
14295 default:
14296 gcc_unreachable ();
14299 for (i = 0; i < 16; ++i)
14300 perm[i] = GEN_INT (swaparray[i]);
14302 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14306 swap_endian_selector_for_mode (machine_mode mode)
14308 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
14309 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14310 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14311 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14313 unsigned int *swaparray, i;
14314 rtx perm[16];
14316 switch (mode)
14318 case E_V1TImode:
14319 swaparray = swap1;
14320 break;
14321 case E_V2DFmode:
14322 case E_V2DImode:
14323 swaparray = swap2;
14324 break;
14325 case E_V4SFmode:
14326 case E_V4SImode:
14327 swaparray = swap4;
14328 break;
14329 case E_V8HImode:
14330 swaparray = swap8;
14331 break;
14332 default:
14333 gcc_unreachable ();
14336 for (i = 0; i < 16; ++i)
14337 perm[i] = GEN_INT (swaparray[i]);
14339 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
14340 gen_rtvec_v (16, perm)));
14343 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14344 with -maltivec=be specified. Issue the load followed by an element-
14345 reversing permute. */
14346 void
14347 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14349 rtx tmp = gen_reg_rtx (mode);
14350 rtx load = gen_rtx_SET (tmp, op1);
14351 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14352 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14353 rtx sel = swap_selector_for_mode (mode);
14354 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14356 gcc_assert (REG_P (op0));
14357 emit_insn (par);
14358 emit_insn (gen_rtx_SET (op0, vperm));
14361 /* Generate code for a "stvxl" built-in for a little endian target with
14362 -maltivec=be specified. Issue the store preceded by an element-reversing
14363 permute. */
14364 void
14365 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14367 rtx tmp = gen_reg_rtx (mode);
14368 rtx store = gen_rtx_SET (op0, tmp);
14369 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14370 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14371 rtx sel = swap_selector_for_mode (mode);
14372 rtx vperm;
14374 gcc_assert (REG_P (op1));
14375 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14376 emit_insn (gen_rtx_SET (tmp, vperm));
14377 emit_insn (par);
14380 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14381 specified. Issue the store preceded by an element-reversing permute. */
14382 void
14383 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14385 machine_mode inner_mode = GET_MODE_INNER (mode);
14386 rtx tmp = gen_reg_rtx (mode);
14387 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14388 rtx sel = swap_selector_for_mode (mode);
14389 rtx vperm;
14391 gcc_assert (REG_P (op1));
14392 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14393 emit_insn (gen_rtx_SET (tmp, vperm));
14394 emit_insn (gen_rtx_SET (op0, stvx));
14397 static rtx
14398 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14400 rtx pat, addr;
14401 tree arg0 = CALL_EXPR_ARG (exp, 0);
14402 tree arg1 = CALL_EXPR_ARG (exp, 1);
14403 machine_mode tmode = insn_data[icode].operand[0].mode;
14404 machine_mode mode0 = Pmode;
14405 machine_mode mode1 = Pmode;
14406 rtx op0 = expand_normal (arg0);
14407 rtx op1 = expand_normal (arg1);
14409 if (icode == CODE_FOR_nothing)
14410 /* Builtin not supported on this processor. */
14411 return 0;
14413 /* If we got invalid arguments bail out before generating bad rtl. */
14414 if (arg0 == error_mark_node || arg1 == error_mark_node)
14415 return const0_rtx;
14417 if (target == 0
14418 || GET_MODE (target) != tmode
14419 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14420 target = gen_reg_rtx (tmode);
14422 op1 = copy_to_mode_reg (mode1, op1);
14424 /* For LVX, express the RTL accurately by ANDing the address with -16.
14425 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14426 so the raw address is fine. */
14427 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14428 || icode == CODE_FOR_altivec_lvx_v2di_2op
14429 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14430 || icode == CODE_FOR_altivec_lvx_v4si_2op
14431 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14432 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14434 rtx rawaddr;
14435 if (op0 == const0_rtx)
14436 rawaddr = op1;
14437 else
14439 op0 = copy_to_mode_reg (mode0, op0);
14440 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14442 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14443 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14445 /* For -maltivec=be, emit the load and follow it up with a
14446 permute to swap the elements. */
14447 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14449 rtx temp = gen_reg_rtx (tmode);
14450 emit_insn (gen_rtx_SET (temp, addr));
14452 rtx sel = swap_selector_for_mode (tmode);
14453 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14454 UNSPEC_VPERM);
14455 emit_insn (gen_rtx_SET (target, vperm));
14457 else
14458 emit_insn (gen_rtx_SET (target, addr));
14460 else
14462 if (op0 == const0_rtx)
14463 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14464 else
14466 op0 = copy_to_mode_reg (mode0, op0);
14467 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14468 gen_rtx_PLUS (Pmode, op1, op0));
14471 pat = GEN_FCN (icode) (target, addr);
14472 if (! pat)
14473 return 0;
14474 emit_insn (pat);
14477 return target;
14480 static rtx
14481 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14483 tree arg0 = CALL_EXPR_ARG (exp, 0);
14484 tree arg1 = CALL_EXPR_ARG (exp, 1);
14485 tree arg2 = CALL_EXPR_ARG (exp, 2);
14486 rtx op0 = expand_normal (arg0);
14487 rtx op1 = expand_normal (arg1);
14488 rtx op2 = expand_normal (arg2);
14489 rtx pat, addr;
14490 machine_mode tmode = insn_data[icode].operand[0].mode;
14491 machine_mode mode1 = Pmode;
14492 machine_mode mode2 = Pmode;
14494 /* Invalid arguments. Bail before doing anything stoopid! */
14495 if (arg0 == error_mark_node
14496 || arg1 == error_mark_node
14497 || arg2 == error_mark_node)
14498 return const0_rtx;
14500 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14501 op0 = copy_to_mode_reg (tmode, op0);
14503 op2 = copy_to_mode_reg (mode2, op2);
14505 if (op1 == const0_rtx)
14507 addr = gen_rtx_MEM (tmode, op2);
14509 else
14511 op1 = copy_to_mode_reg (mode1, op1);
14512 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14515 pat = GEN_FCN (icode) (addr, op0);
14516 if (pat)
14517 emit_insn (pat);
14518 return NULL_RTX;
14521 static rtx
14522 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14524 rtx pat;
14525 tree arg0 = CALL_EXPR_ARG (exp, 0);
14526 tree arg1 = CALL_EXPR_ARG (exp, 1);
14527 tree arg2 = CALL_EXPR_ARG (exp, 2);
14528 rtx op0 = expand_normal (arg0);
14529 rtx op1 = expand_normal (arg1);
14530 rtx op2 = expand_normal (arg2);
14531 machine_mode mode0 = insn_data[icode].operand[0].mode;
14532 machine_mode mode1 = insn_data[icode].operand[1].mode;
14533 machine_mode mode2 = insn_data[icode].operand[2].mode;
14535 if (icode == CODE_FOR_nothing)
14536 /* Builtin not supported on this processor. */
14537 return NULL_RTX;
14539 /* If we got invalid arguments bail out before generating bad rtl. */
14540 if (arg0 == error_mark_node
14541 || arg1 == error_mark_node
14542 || arg2 == error_mark_node)
14543 return NULL_RTX;
14545 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14546 op0 = copy_to_mode_reg (mode0, op0);
14547 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14548 op1 = copy_to_mode_reg (mode1, op1);
14549 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14550 op2 = copy_to_mode_reg (mode2, op2);
14552 pat = GEN_FCN (icode) (op0, op1, op2);
14553 if (pat)
14554 emit_insn (pat);
14556 return NULL_RTX;
14559 static rtx
14560 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14562 tree arg0 = CALL_EXPR_ARG (exp, 0);
14563 tree arg1 = CALL_EXPR_ARG (exp, 1);
14564 tree arg2 = CALL_EXPR_ARG (exp, 2);
14565 rtx op0 = expand_normal (arg0);
14566 rtx op1 = expand_normal (arg1);
14567 rtx op2 = expand_normal (arg2);
14568 rtx pat, addr, rawaddr;
14569 machine_mode tmode = insn_data[icode].operand[0].mode;
14570 machine_mode smode = insn_data[icode].operand[1].mode;
14571 machine_mode mode1 = Pmode;
14572 machine_mode mode2 = Pmode;
14574 /* Invalid arguments. Bail before doing anything stoopid! */
14575 if (arg0 == error_mark_node
14576 || arg1 == error_mark_node
14577 || arg2 == error_mark_node)
14578 return const0_rtx;
14580 op2 = copy_to_mode_reg (mode2, op2);
14582 /* For STVX, express the RTL accurately by ANDing the address with -16.
14583 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14584 so the raw address is fine. */
14585 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14586 || icode == CODE_FOR_altivec_stvx_v2di_2op
14587 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14588 || icode == CODE_FOR_altivec_stvx_v4si_2op
14589 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14590 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14592 if (op1 == const0_rtx)
14593 rawaddr = op2;
14594 else
14596 op1 = copy_to_mode_reg (mode1, op1);
14597 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14600 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14601 addr = gen_rtx_MEM (tmode, addr);
14603 op0 = copy_to_mode_reg (tmode, op0);
14605 /* For -maltivec=be, emit a permute to swap the elements, followed
14606 by the store. */
14607 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14609 rtx temp = gen_reg_rtx (tmode);
14610 rtx sel = swap_selector_for_mode (tmode);
14611 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14612 UNSPEC_VPERM);
14613 emit_insn (gen_rtx_SET (temp, vperm));
14614 emit_insn (gen_rtx_SET (addr, temp));
14616 else
14617 emit_insn (gen_rtx_SET (addr, op0));
14619 else
14621 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14622 op0 = copy_to_mode_reg (smode, op0);
14624 if (op1 == const0_rtx)
14625 addr = gen_rtx_MEM (tmode, op2);
14626 else
14628 op1 = copy_to_mode_reg (mode1, op1);
14629 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14632 pat = GEN_FCN (icode) (addr, op0);
14633 if (pat)
14634 emit_insn (pat);
14637 return NULL_RTX;
14640 /* Return the appropriate SPR number associated with the given builtin. */
14641 static inline HOST_WIDE_INT
14642 htm_spr_num (enum rs6000_builtins code)
14644 if (code == HTM_BUILTIN_GET_TFHAR
14645 || code == HTM_BUILTIN_SET_TFHAR)
14646 return TFHAR_SPR;
14647 else if (code == HTM_BUILTIN_GET_TFIAR
14648 || code == HTM_BUILTIN_SET_TFIAR)
14649 return TFIAR_SPR;
14650 else if (code == HTM_BUILTIN_GET_TEXASR
14651 || code == HTM_BUILTIN_SET_TEXASR)
14652 return TEXASR_SPR;
14653 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14654 || code == HTM_BUILTIN_SET_TEXASRU);
14655 return TEXASRU_SPR;
14658 /* Return the appropriate SPR regno associated with the given builtin. */
14659 static inline HOST_WIDE_INT
14660 htm_spr_regno (enum rs6000_builtins code)
14662 if (code == HTM_BUILTIN_GET_TFHAR
14663 || code == HTM_BUILTIN_SET_TFHAR)
14664 return TFHAR_REGNO;
14665 else if (code == HTM_BUILTIN_GET_TFIAR
14666 || code == HTM_BUILTIN_SET_TFIAR)
14667 return TFIAR_REGNO;
14668 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14669 || code == HTM_BUILTIN_SET_TEXASR
14670 || code == HTM_BUILTIN_GET_TEXASRU
14671 || code == HTM_BUILTIN_SET_TEXASRU);
14672 return TEXASR_REGNO;
14675 /* Return the correct ICODE value depending on whether we are
14676 setting or reading the HTM SPRs. */
14677 static inline enum insn_code
14678 rs6000_htm_spr_icode (bool nonvoid)
14680 if (nonvoid)
14681 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14682 else
14683 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14686 /* Expand the HTM builtin in EXP and store the result in TARGET.
14687 Store true in *EXPANDEDP if we found a builtin to expand. */
14688 static rtx
14689 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14691 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14692 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14693 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14694 const struct builtin_description *d;
14695 size_t i;
14697 *expandedp = true;
14699 if (!TARGET_POWERPC64
14700 && (fcode == HTM_BUILTIN_TABORTDC
14701 || fcode == HTM_BUILTIN_TABORTDCI))
14703 size_t uns_fcode = (size_t)fcode;
14704 const char *name = rs6000_builtin_info[uns_fcode].name;
14705 error ("builtin %qs is only valid in 64-bit mode", name);
14706 return const0_rtx;
14709 /* Expand the HTM builtins. */
14710 d = bdesc_htm;
14711 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14712 if (d->code == fcode)
14714 rtx op[MAX_HTM_OPERANDS], pat;
14715 int nopnds = 0;
14716 tree arg;
14717 call_expr_arg_iterator iter;
14718 unsigned attr = rs6000_builtin_info[fcode].attr;
14719 enum insn_code icode = d->icode;
14720 const struct insn_operand_data *insn_op;
14721 bool uses_spr = (attr & RS6000_BTC_SPR);
14722 rtx cr = NULL_RTX;
14724 if (uses_spr)
14725 icode = rs6000_htm_spr_icode (nonvoid);
14726 insn_op = &insn_data[icode].operand[0];
14728 if (nonvoid)
14730 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14731 if (!target
14732 || GET_MODE (target) != tmode
14733 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14734 target = gen_reg_rtx (tmode);
14735 if (uses_spr)
14736 op[nopnds++] = target;
14739 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14741 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14742 return const0_rtx;
14744 insn_op = &insn_data[icode].operand[nopnds];
14746 op[nopnds] = expand_normal (arg);
14748 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14750 if (!strcmp (insn_op->constraint, "n"))
14752 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14753 if (!CONST_INT_P (op[nopnds]))
14754 error ("argument %d must be an unsigned literal", arg_num);
14755 else
14756 error ("argument %d is an unsigned literal that is "
14757 "out of range", arg_num);
14758 return const0_rtx;
14760 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14763 nopnds++;
14766 /* Handle the builtins for extended mnemonics. These accept
14767 no arguments, but map to builtins that take arguments. */
14768 switch (fcode)
14770 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14771 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14772 op[nopnds++] = GEN_INT (1);
14773 if (flag_checking)
14774 attr |= RS6000_BTC_UNARY;
14775 break;
14776 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14777 op[nopnds++] = GEN_INT (0);
14778 if (flag_checking)
14779 attr |= RS6000_BTC_UNARY;
14780 break;
14781 default:
14782 break;
14785 /* If this builtin accesses SPRs, then pass in the appropriate
14786 SPR number and SPR regno as the last two operands. */
14787 if (uses_spr)
14789 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14790 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14791 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14793 /* If this builtin accesses a CR, then pass in a scratch
14794 CR as the last operand. */
14795 else if (attr & RS6000_BTC_CR)
14796 { cr = gen_reg_rtx (CCmode);
14797 op[nopnds++] = cr;
14800 if (flag_checking)
14802 int expected_nopnds = 0;
14803 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14804 expected_nopnds = 1;
14805 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14806 expected_nopnds = 2;
14807 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14808 expected_nopnds = 3;
14809 if (!(attr & RS6000_BTC_VOID))
14810 expected_nopnds += 1;
14811 if (uses_spr)
14812 expected_nopnds += 2;
14814 gcc_assert (nopnds == expected_nopnds
14815 && nopnds <= MAX_HTM_OPERANDS);
14818 switch (nopnds)
14820 case 1:
14821 pat = GEN_FCN (icode) (op[0]);
14822 break;
14823 case 2:
14824 pat = GEN_FCN (icode) (op[0], op[1]);
14825 break;
14826 case 3:
14827 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14828 break;
14829 case 4:
14830 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14831 break;
14832 default:
14833 gcc_unreachable ();
14835 if (!pat)
14836 return NULL_RTX;
14837 emit_insn (pat);
14839 if (attr & RS6000_BTC_CR)
14841 if (fcode == HTM_BUILTIN_TBEGIN)
14843 /* Emit code to set TARGET to true or false depending on
14844 whether the tbegin. instruction successfully or failed
14845 to start a transaction. We do this by placing the 1's
14846 complement of CR's EQ bit into TARGET. */
14847 rtx scratch = gen_reg_rtx (SImode);
14848 emit_insn (gen_rtx_SET (scratch,
14849 gen_rtx_EQ (SImode, cr,
14850 const0_rtx)));
14851 emit_insn (gen_rtx_SET (target,
14852 gen_rtx_XOR (SImode, scratch,
14853 GEN_INT (1))));
14855 else
14857 /* Emit code to copy the 4-bit condition register field
14858 CR into the least significant end of register TARGET. */
14859 rtx scratch1 = gen_reg_rtx (SImode);
14860 rtx scratch2 = gen_reg_rtx (SImode);
14861 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14862 emit_insn (gen_movcc (subreg, cr));
14863 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14864 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14868 if (nonvoid)
14869 return target;
14870 return const0_rtx;
14873 *expandedp = false;
14874 return NULL_RTX;
14877 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14879 static rtx
14880 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14881 rtx target)
14883 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14884 if (fcode == RS6000_BUILTIN_CPU_INIT)
14885 return const0_rtx;
14887 if (target == 0 || GET_MODE (target) != SImode)
14888 target = gen_reg_rtx (SImode);
14890 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14891 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14892 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14893 to a STRING_CST. */
14894 if (TREE_CODE (arg) == ARRAY_REF
14895 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14896 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14897 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14898 arg = TREE_OPERAND (arg, 0);
14900 if (TREE_CODE (arg) != STRING_CST)
14902 error ("builtin %qs only accepts a string argument",
14903 rs6000_builtin_info[(size_t) fcode].name);
14904 return const0_rtx;
14907 if (fcode == RS6000_BUILTIN_CPU_IS)
14909 const char *cpu = TREE_STRING_POINTER (arg);
14910 rtx cpuid = NULL_RTX;
14911 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14912 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14914 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14915 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14916 break;
14918 if (cpuid == NULL_RTX)
14920 /* Invalid CPU argument. */
14921 error ("cpu %qs is an invalid argument to builtin %qs",
14922 cpu, rs6000_builtin_info[(size_t) fcode].name);
14923 return const0_rtx;
14926 rtx platform = gen_reg_rtx (SImode);
14927 rtx tcbmem = gen_const_mem (SImode,
14928 gen_rtx_PLUS (Pmode,
14929 gen_rtx_REG (Pmode, TLS_REGNUM),
14930 GEN_INT (TCB_PLATFORM_OFFSET)));
14931 emit_move_insn (platform, tcbmem);
14932 emit_insn (gen_eqsi3 (target, platform, cpuid));
14934 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14936 const char *hwcap = TREE_STRING_POINTER (arg);
14937 rtx mask = NULL_RTX;
14938 int hwcap_offset;
14939 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14940 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14942 mask = GEN_INT (cpu_supports_info[i].mask);
14943 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14944 break;
14946 if (mask == NULL_RTX)
14948 /* Invalid HWCAP argument. */
14949 error ("%s %qs is an invalid argument to builtin %qs",
14950 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14951 return const0_rtx;
14954 rtx tcb_hwcap = gen_reg_rtx (SImode);
14955 rtx tcbmem = gen_const_mem (SImode,
14956 gen_rtx_PLUS (Pmode,
14957 gen_rtx_REG (Pmode, TLS_REGNUM),
14958 GEN_INT (hwcap_offset)));
14959 emit_move_insn (tcb_hwcap, tcbmem);
14960 rtx scratch1 = gen_reg_rtx (SImode);
14961 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14962 rtx scratch2 = gen_reg_rtx (SImode);
14963 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14964 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14966 else
14967 gcc_unreachable ();
14969 /* Record that we have expanded a CPU builtin, so that we can later
14970 emit a reference to the special symbol exported by LIBC to ensure we
14971 do not link against an old LIBC that doesn't support this feature. */
14972 cpu_builtin_p = true;
14974 #else
14975 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14976 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14978 /* For old LIBCs, always return FALSE. */
14979 emit_move_insn (target, GEN_INT (0));
14980 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14982 return target;
14985 static rtx
14986 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14988 rtx pat;
14989 tree arg0 = CALL_EXPR_ARG (exp, 0);
14990 tree arg1 = CALL_EXPR_ARG (exp, 1);
14991 tree arg2 = CALL_EXPR_ARG (exp, 2);
14992 rtx op0 = expand_normal (arg0);
14993 rtx op1 = expand_normal (arg1);
14994 rtx op2 = expand_normal (arg2);
14995 machine_mode tmode = insn_data[icode].operand[0].mode;
14996 machine_mode mode0 = insn_data[icode].operand[1].mode;
14997 machine_mode mode1 = insn_data[icode].operand[2].mode;
14998 machine_mode mode2 = insn_data[icode].operand[3].mode;
15000 if (icode == CODE_FOR_nothing)
15001 /* Builtin not supported on this processor. */
15002 return 0;
15004 /* If we got invalid arguments bail out before generating bad rtl. */
15005 if (arg0 == error_mark_node
15006 || arg1 == error_mark_node
15007 || arg2 == error_mark_node)
15008 return const0_rtx;
15010 /* Check and prepare argument depending on the instruction code.
15012 Note that a switch statement instead of the sequence of tests
15013 would be incorrect as many of the CODE_FOR values could be
15014 CODE_FOR_nothing and that would yield multiple alternatives
15015 with identical values. We'd never reach here at runtime in
15016 this case. */
15017 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15018 || icode == CODE_FOR_altivec_vsldoi_v2df
15019 || icode == CODE_FOR_altivec_vsldoi_v4si
15020 || icode == CODE_FOR_altivec_vsldoi_v8hi
15021 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15023 /* Only allow 4-bit unsigned literals. */
15024 STRIP_NOPS (arg2);
15025 if (TREE_CODE (arg2) != INTEGER_CST
15026 || TREE_INT_CST_LOW (arg2) & ~0xf)
15028 error ("argument 3 must be a 4-bit unsigned literal");
15029 return CONST0_RTX (tmode);
15032 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15033 || icode == CODE_FOR_vsx_xxpermdi_v2di
15034 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15035 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15036 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15037 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15038 || icode == CODE_FOR_vsx_xxpermdi_v4si
15039 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15040 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15041 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15042 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15043 || icode == CODE_FOR_vsx_xxsldwi_v4si
15044 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15045 || icode == CODE_FOR_vsx_xxsldwi_v2di
15046 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15048 /* Only allow 2-bit unsigned literals. */
15049 STRIP_NOPS (arg2);
15050 if (TREE_CODE (arg2) != INTEGER_CST
15051 || TREE_INT_CST_LOW (arg2) & ~0x3)
15053 error ("argument 3 must be a 2-bit unsigned literal");
15054 return CONST0_RTX (tmode);
15057 else if (icode == CODE_FOR_vsx_set_v2df
15058 || icode == CODE_FOR_vsx_set_v2di
15059 || icode == CODE_FOR_bcdadd
15060 || icode == CODE_FOR_bcdadd_lt
15061 || icode == CODE_FOR_bcdadd_eq
15062 || icode == CODE_FOR_bcdadd_gt
15063 || icode == CODE_FOR_bcdsub
15064 || icode == CODE_FOR_bcdsub_lt
15065 || icode == CODE_FOR_bcdsub_eq
15066 || icode == CODE_FOR_bcdsub_gt)
15068 /* Only allow 1-bit unsigned literals. */
15069 STRIP_NOPS (arg2);
15070 if (TREE_CODE (arg2) != INTEGER_CST
15071 || TREE_INT_CST_LOW (arg2) & ~0x1)
15073 error ("argument 3 must be a 1-bit unsigned literal");
15074 return CONST0_RTX (tmode);
15077 else if (icode == CODE_FOR_dfp_ddedpd_dd
15078 || icode == CODE_FOR_dfp_ddedpd_td)
15080 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15081 STRIP_NOPS (arg0);
15082 if (TREE_CODE (arg0) != INTEGER_CST
15083 || TREE_INT_CST_LOW (arg2) & ~0x3)
15085 error ("argument 1 must be 0 or 2");
15086 return CONST0_RTX (tmode);
15089 else if (icode == CODE_FOR_dfp_denbcd_dd
15090 || icode == CODE_FOR_dfp_denbcd_td)
15092 /* Only allow 1-bit unsigned literals. */
15093 STRIP_NOPS (arg0);
15094 if (TREE_CODE (arg0) != INTEGER_CST
15095 || TREE_INT_CST_LOW (arg0) & ~0x1)
15097 error ("argument 1 must be a 1-bit unsigned literal");
15098 return CONST0_RTX (tmode);
15101 else if (icode == CODE_FOR_dfp_dscli_dd
15102 || icode == CODE_FOR_dfp_dscli_td
15103 || icode == CODE_FOR_dfp_dscri_dd
15104 || icode == CODE_FOR_dfp_dscri_td)
15106 /* Only allow 6-bit unsigned literals. */
15107 STRIP_NOPS (arg1);
15108 if (TREE_CODE (arg1) != INTEGER_CST
15109 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15111 error ("argument 2 must be a 6-bit unsigned literal");
15112 return CONST0_RTX (tmode);
15115 else if (icode == CODE_FOR_crypto_vshasigmaw
15116 || icode == CODE_FOR_crypto_vshasigmad)
15118 /* Check whether the 2nd and 3rd arguments are integer constants and in
15119 range and prepare arguments. */
15120 STRIP_NOPS (arg1);
15121 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
15123 error ("argument 2 must be 0 or 1");
15124 return CONST0_RTX (tmode);
15127 STRIP_NOPS (arg2);
15128 if (TREE_CODE (arg2) != INTEGER_CST
15129 || wi::geu_p (wi::to_wide (arg2), 16))
15131 error ("argument 3 must be in the range 0..15");
15132 return CONST0_RTX (tmode);
15136 if (target == 0
15137 || GET_MODE (target) != tmode
15138 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15139 target = gen_reg_rtx (tmode);
15141 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15142 op0 = copy_to_mode_reg (mode0, op0);
15143 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15144 op1 = copy_to_mode_reg (mode1, op1);
15145 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15146 op2 = copy_to_mode_reg (mode2, op2);
15148 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15149 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15150 else
15151 pat = GEN_FCN (icode) (target, op0, op1, op2);
15152 if (! pat)
15153 return 0;
15154 emit_insn (pat);
15156 return target;
15159 /* Expand the lvx builtins. */
15160 static rtx
15161 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15163 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15164 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15165 tree arg0;
15166 machine_mode tmode, mode0;
15167 rtx pat, op0;
15168 enum insn_code icode;
15170 switch (fcode)
15172 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15173 icode = CODE_FOR_vector_altivec_load_v16qi;
15174 break;
15175 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15176 icode = CODE_FOR_vector_altivec_load_v8hi;
15177 break;
15178 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15179 icode = CODE_FOR_vector_altivec_load_v4si;
15180 break;
15181 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15182 icode = CODE_FOR_vector_altivec_load_v4sf;
15183 break;
15184 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15185 icode = CODE_FOR_vector_altivec_load_v2df;
15186 break;
15187 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15188 icode = CODE_FOR_vector_altivec_load_v2di;
15189 break;
15190 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15191 icode = CODE_FOR_vector_altivec_load_v1ti;
15192 break;
15193 default:
15194 *expandedp = false;
15195 return NULL_RTX;
15198 *expandedp = true;
15200 arg0 = CALL_EXPR_ARG (exp, 0);
15201 op0 = expand_normal (arg0);
15202 tmode = insn_data[icode].operand[0].mode;
15203 mode0 = insn_data[icode].operand[1].mode;
15205 if (target == 0
15206 || GET_MODE (target) != tmode
15207 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15208 target = gen_reg_rtx (tmode);
15210 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15211 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15213 pat = GEN_FCN (icode) (target, op0);
15214 if (! pat)
15215 return 0;
15216 emit_insn (pat);
15217 return target;
15220 /* Expand the stvx builtins. */
15221 static rtx
15222 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15223 bool *expandedp)
15225 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15226 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15227 tree arg0, arg1;
15228 machine_mode mode0, mode1;
15229 rtx pat, op0, op1;
15230 enum insn_code icode;
15232 switch (fcode)
15234 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15235 icode = CODE_FOR_vector_altivec_store_v16qi;
15236 break;
15237 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15238 icode = CODE_FOR_vector_altivec_store_v8hi;
15239 break;
15240 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15241 icode = CODE_FOR_vector_altivec_store_v4si;
15242 break;
15243 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15244 icode = CODE_FOR_vector_altivec_store_v4sf;
15245 break;
15246 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15247 icode = CODE_FOR_vector_altivec_store_v2df;
15248 break;
15249 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15250 icode = CODE_FOR_vector_altivec_store_v2di;
15251 break;
15252 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15253 icode = CODE_FOR_vector_altivec_store_v1ti;
15254 break;
15255 default:
15256 *expandedp = false;
15257 return NULL_RTX;
15260 arg0 = CALL_EXPR_ARG (exp, 0);
15261 arg1 = CALL_EXPR_ARG (exp, 1);
15262 op0 = expand_normal (arg0);
15263 op1 = expand_normal (arg1);
15264 mode0 = insn_data[icode].operand[0].mode;
15265 mode1 = insn_data[icode].operand[1].mode;
15267 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15268 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15269 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15270 op1 = copy_to_mode_reg (mode1, op1);
15272 pat = GEN_FCN (icode) (op0, op1);
15273 if (pat)
15274 emit_insn (pat);
15276 *expandedp = true;
15277 return NULL_RTX;
15280 /* Expand the dst builtins. */
15281 static rtx
15282 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15283 bool *expandedp)
15285 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15286 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15287 tree arg0, arg1, arg2;
15288 machine_mode mode0, mode1;
15289 rtx pat, op0, op1, op2;
15290 const struct builtin_description *d;
15291 size_t i;
15293 *expandedp = false;
15295 /* Handle DST variants. */
15296 d = bdesc_dst;
15297 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15298 if (d->code == fcode)
15300 arg0 = CALL_EXPR_ARG (exp, 0);
15301 arg1 = CALL_EXPR_ARG (exp, 1);
15302 arg2 = CALL_EXPR_ARG (exp, 2);
15303 op0 = expand_normal (arg0);
15304 op1 = expand_normal (arg1);
15305 op2 = expand_normal (arg2);
15306 mode0 = insn_data[d->icode].operand[0].mode;
15307 mode1 = insn_data[d->icode].operand[1].mode;
15309 /* Invalid arguments, bail out before generating bad rtl. */
15310 if (arg0 == error_mark_node
15311 || arg1 == error_mark_node
15312 || arg2 == error_mark_node)
15313 return const0_rtx;
15315 *expandedp = true;
15316 STRIP_NOPS (arg2);
15317 if (TREE_CODE (arg2) != INTEGER_CST
15318 || TREE_INT_CST_LOW (arg2) & ~0x3)
15320 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15321 return const0_rtx;
15324 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15325 op0 = copy_to_mode_reg (Pmode, op0);
15326 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15327 op1 = copy_to_mode_reg (mode1, op1);
15329 pat = GEN_FCN (d->icode) (op0, op1, op2);
15330 if (pat != 0)
15331 emit_insn (pat);
15333 return NULL_RTX;
15336 return NULL_RTX;
15339 /* Expand vec_init builtin. */
15340 static rtx
15341 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15343 machine_mode tmode = TYPE_MODE (type);
15344 machine_mode inner_mode = GET_MODE_INNER (tmode);
15345 int i, n_elt = GET_MODE_NUNITS (tmode);
15347 gcc_assert (VECTOR_MODE_P (tmode));
15348 gcc_assert (n_elt == call_expr_nargs (exp));
15350 if (!target || !register_operand (target, tmode))
15351 target = gen_reg_rtx (tmode);
15353 /* If we have a vector compromised of a single element, such as V1TImode, do
15354 the initialization directly. */
15355 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15357 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15358 emit_move_insn (target, gen_lowpart (tmode, x));
15360 else
15362 rtvec v = rtvec_alloc (n_elt);
15364 for (i = 0; i < n_elt; ++i)
15366 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15367 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15370 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15373 return target;
15376 /* Return the integer constant in ARG. Constrain it to be in the range
15377 of the subparts of VEC_TYPE; issue an error if not. */
15379 static int
15380 get_element_number (tree vec_type, tree arg)
15382 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15384 if (!tree_fits_uhwi_p (arg)
15385 || (elt = tree_to_uhwi (arg), elt > max))
15387 error ("selector must be an integer constant in the range 0..%wi", max);
15388 return 0;
15391 return elt;
15394 /* Expand vec_set builtin. */
15395 static rtx
15396 altivec_expand_vec_set_builtin (tree exp)
15398 machine_mode tmode, mode1;
15399 tree arg0, arg1, arg2;
15400 int elt;
15401 rtx op0, op1;
15403 arg0 = CALL_EXPR_ARG (exp, 0);
15404 arg1 = CALL_EXPR_ARG (exp, 1);
15405 arg2 = CALL_EXPR_ARG (exp, 2);
15407 tmode = TYPE_MODE (TREE_TYPE (arg0));
15408 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15409 gcc_assert (VECTOR_MODE_P (tmode));
15411 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15412 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15413 elt = get_element_number (TREE_TYPE (arg0), arg2);
15415 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15416 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15418 op0 = force_reg (tmode, op0);
15419 op1 = force_reg (mode1, op1);
15421 rs6000_expand_vector_set (op0, op1, elt);
15423 return op0;
15426 /* Expand vec_ext builtin. */
15427 static rtx
15428 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15430 machine_mode tmode, mode0;
15431 tree arg0, arg1;
15432 rtx op0;
15433 rtx op1;
15435 arg0 = CALL_EXPR_ARG (exp, 0);
15436 arg1 = CALL_EXPR_ARG (exp, 1);
15438 op0 = expand_normal (arg0);
15439 op1 = expand_normal (arg1);
15441 /* Call get_element_number to validate arg1 if it is a constant. */
15442 if (TREE_CODE (arg1) == INTEGER_CST)
15443 (void) get_element_number (TREE_TYPE (arg0), arg1);
15445 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15446 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15447 gcc_assert (VECTOR_MODE_P (mode0));
15449 op0 = force_reg (mode0, op0);
15451 if (optimize || !target || !register_operand (target, tmode))
15452 target = gen_reg_rtx (tmode);
15454 rs6000_expand_vector_extract (target, op0, op1);
15456 return target;
15459 /* Expand the builtin in EXP and store the result in TARGET. Store
15460 true in *EXPANDEDP if we found a builtin to expand. */
15461 static rtx
15462 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15464 const struct builtin_description *d;
15465 size_t i;
15466 enum insn_code icode;
15467 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15468 tree arg0, arg1, arg2;
15469 rtx op0, pat;
15470 machine_mode tmode, mode0;
15471 enum rs6000_builtins fcode
15472 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15474 if (rs6000_overloaded_builtin_p (fcode))
15476 *expandedp = true;
15477 error ("unresolved overload for Altivec builtin %qF", fndecl);
15479 /* Given it is invalid, just generate a normal call. */
15480 return expand_call (exp, target, false);
15483 target = altivec_expand_ld_builtin (exp, target, expandedp);
15484 if (*expandedp)
15485 return target;
15487 target = altivec_expand_st_builtin (exp, target, expandedp);
15488 if (*expandedp)
15489 return target;
15491 target = altivec_expand_dst_builtin (exp, target, expandedp);
15492 if (*expandedp)
15493 return target;
15495 *expandedp = true;
15497 switch (fcode)
15499 case ALTIVEC_BUILTIN_STVX_V2DF:
15500 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15501 case ALTIVEC_BUILTIN_STVX_V2DI:
15502 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15503 case ALTIVEC_BUILTIN_STVX_V4SF:
15504 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15505 case ALTIVEC_BUILTIN_STVX:
15506 case ALTIVEC_BUILTIN_STVX_V4SI:
15507 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15508 case ALTIVEC_BUILTIN_STVX_V8HI:
15509 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15510 case ALTIVEC_BUILTIN_STVX_V16QI:
15511 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15512 case ALTIVEC_BUILTIN_STVEBX:
15513 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15514 case ALTIVEC_BUILTIN_STVEHX:
15515 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15516 case ALTIVEC_BUILTIN_STVEWX:
15517 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15518 case ALTIVEC_BUILTIN_STVXL_V2DF:
15519 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15520 case ALTIVEC_BUILTIN_STVXL_V2DI:
15521 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15522 case ALTIVEC_BUILTIN_STVXL_V4SF:
15523 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15524 case ALTIVEC_BUILTIN_STVXL:
15525 case ALTIVEC_BUILTIN_STVXL_V4SI:
15526 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15527 case ALTIVEC_BUILTIN_STVXL_V8HI:
15528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15529 case ALTIVEC_BUILTIN_STVXL_V16QI:
15530 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15532 case ALTIVEC_BUILTIN_STVLX:
15533 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15534 case ALTIVEC_BUILTIN_STVLXL:
15535 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15536 case ALTIVEC_BUILTIN_STVRX:
15537 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15538 case ALTIVEC_BUILTIN_STVRXL:
15539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15541 case P9V_BUILTIN_STXVL:
15542 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15544 case P9V_BUILTIN_XST_LEN_R:
15545 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
15547 case VSX_BUILTIN_STXVD2X_V1TI:
15548 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15549 case VSX_BUILTIN_STXVD2X_V2DF:
15550 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15551 case VSX_BUILTIN_STXVD2X_V2DI:
15552 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15553 case VSX_BUILTIN_STXVW4X_V4SF:
15554 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15555 case VSX_BUILTIN_STXVW4X_V4SI:
15556 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15557 case VSX_BUILTIN_STXVW4X_V8HI:
15558 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15559 case VSX_BUILTIN_STXVW4X_V16QI:
15560 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15562 /* For the following on big endian, it's ok to use any appropriate
15563 unaligned-supporting store, so use a generic expander. For
15564 little-endian, the exact element-reversing instruction must
15565 be used. */
15566 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15568 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15569 : CODE_FOR_vsx_st_elemrev_v2df);
15570 return altivec_expand_stv_builtin (code, exp);
15572 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15574 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15575 : CODE_FOR_vsx_st_elemrev_v2di);
15576 return altivec_expand_stv_builtin (code, exp);
15578 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15580 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15581 : CODE_FOR_vsx_st_elemrev_v4sf);
15582 return altivec_expand_stv_builtin (code, exp);
15584 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15586 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15587 : CODE_FOR_vsx_st_elemrev_v4si);
15588 return altivec_expand_stv_builtin (code, exp);
15590 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15592 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15593 : CODE_FOR_vsx_st_elemrev_v8hi);
15594 return altivec_expand_stv_builtin (code, exp);
15596 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15598 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15599 : CODE_FOR_vsx_st_elemrev_v16qi);
15600 return altivec_expand_stv_builtin (code, exp);
15603 case ALTIVEC_BUILTIN_MFVSCR:
15604 icode = CODE_FOR_altivec_mfvscr;
15605 tmode = insn_data[icode].operand[0].mode;
15607 if (target == 0
15608 || GET_MODE (target) != tmode
15609 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15610 target = gen_reg_rtx (tmode);
15612 pat = GEN_FCN (icode) (target);
15613 if (! pat)
15614 return 0;
15615 emit_insn (pat);
15616 return target;
15618 case ALTIVEC_BUILTIN_MTVSCR:
15619 icode = CODE_FOR_altivec_mtvscr;
15620 arg0 = CALL_EXPR_ARG (exp, 0);
15621 op0 = expand_normal (arg0);
15622 mode0 = insn_data[icode].operand[0].mode;
15624 /* If we got invalid arguments bail out before generating bad rtl. */
15625 if (arg0 == error_mark_node)
15626 return const0_rtx;
15628 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15629 op0 = copy_to_mode_reg (mode0, op0);
15631 pat = GEN_FCN (icode) (op0);
15632 if (pat)
15633 emit_insn (pat);
15634 return NULL_RTX;
15636 case ALTIVEC_BUILTIN_DSSALL:
15637 emit_insn (gen_altivec_dssall ());
15638 return NULL_RTX;
15640 case ALTIVEC_BUILTIN_DSS:
15641 icode = CODE_FOR_altivec_dss;
15642 arg0 = CALL_EXPR_ARG (exp, 0);
15643 STRIP_NOPS (arg0);
15644 op0 = expand_normal (arg0);
15645 mode0 = insn_data[icode].operand[0].mode;
15647 /* If we got invalid arguments bail out before generating bad rtl. */
15648 if (arg0 == error_mark_node)
15649 return const0_rtx;
15651 if (TREE_CODE (arg0) != INTEGER_CST
15652 || TREE_INT_CST_LOW (arg0) & ~0x3)
15654 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15655 return const0_rtx;
15658 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15659 op0 = copy_to_mode_reg (mode0, op0);
15661 emit_insn (gen_altivec_dss (op0));
15662 return NULL_RTX;
15664 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15665 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15666 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15667 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15668 case VSX_BUILTIN_VEC_INIT_V2DF:
15669 case VSX_BUILTIN_VEC_INIT_V2DI:
15670 case VSX_BUILTIN_VEC_INIT_V1TI:
15671 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15673 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15674 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15675 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15676 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15677 case VSX_BUILTIN_VEC_SET_V2DF:
15678 case VSX_BUILTIN_VEC_SET_V2DI:
15679 case VSX_BUILTIN_VEC_SET_V1TI:
15680 return altivec_expand_vec_set_builtin (exp);
15682 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15683 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15684 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15685 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15686 case VSX_BUILTIN_VEC_EXT_V2DF:
15687 case VSX_BUILTIN_VEC_EXT_V2DI:
15688 case VSX_BUILTIN_VEC_EXT_V1TI:
15689 return altivec_expand_vec_ext_builtin (exp, target);
15691 case P9V_BUILTIN_VEXTRACT4B:
15692 case P9V_BUILTIN_VEC_VEXTRACT4B:
15693 arg1 = CALL_EXPR_ARG (exp, 1);
15694 STRIP_NOPS (arg1);
15696 /* Generate a normal call if it is invalid. */
15697 if (arg1 == error_mark_node)
15698 return expand_call (exp, target, false);
15700 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15702 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15703 return expand_call (exp, target, false);
15705 break;
15707 case P9V_BUILTIN_VINSERT4B:
15708 case P9V_BUILTIN_VINSERT4B_DI:
15709 case P9V_BUILTIN_VEC_VINSERT4B:
15710 arg2 = CALL_EXPR_ARG (exp, 2);
15711 STRIP_NOPS (arg2);
15713 /* Generate a normal call if it is invalid. */
15714 if (arg2 == error_mark_node)
15715 return expand_call (exp, target, false);
15717 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15719 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15720 return expand_call (exp, target, false);
15722 break;
15724 default:
15725 break;
15726 /* Fall through. */
15729 /* Expand abs* operations. */
15730 d = bdesc_abs;
15731 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15732 if (d->code == fcode)
15733 return altivec_expand_abs_builtin (d->icode, exp, target);
15735 /* Expand the AltiVec predicates. */
15736 d = bdesc_altivec_preds;
15737 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15738 if (d->code == fcode)
15739 return altivec_expand_predicate_builtin (d->icode, exp, target);
15741 /* LV* are funky. We initialized them differently. */
15742 switch (fcode)
15744 case ALTIVEC_BUILTIN_LVSL:
15745 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15746 exp, target, false);
15747 case ALTIVEC_BUILTIN_LVSR:
15748 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15749 exp, target, false);
15750 case ALTIVEC_BUILTIN_LVEBX:
15751 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15752 exp, target, false);
15753 case ALTIVEC_BUILTIN_LVEHX:
15754 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15755 exp, target, false);
15756 case ALTIVEC_BUILTIN_LVEWX:
15757 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15758 exp, target, false);
15759 case ALTIVEC_BUILTIN_LVXL_V2DF:
15760 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15761 exp, target, false);
15762 case ALTIVEC_BUILTIN_LVXL_V2DI:
15763 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15764 exp, target, false);
15765 case ALTIVEC_BUILTIN_LVXL_V4SF:
15766 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15767 exp, target, false);
15768 case ALTIVEC_BUILTIN_LVXL:
15769 case ALTIVEC_BUILTIN_LVXL_V4SI:
15770 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15771 exp, target, false);
15772 case ALTIVEC_BUILTIN_LVXL_V8HI:
15773 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15774 exp, target, false);
15775 case ALTIVEC_BUILTIN_LVXL_V16QI:
15776 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15777 exp, target, false);
15778 case ALTIVEC_BUILTIN_LVX_V2DF:
15779 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15780 exp, target, false);
15781 case ALTIVEC_BUILTIN_LVX_V2DI:
15782 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15783 exp, target, false);
15784 case ALTIVEC_BUILTIN_LVX_V4SF:
15785 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15786 exp, target, false);
15787 case ALTIVEC_BUILTIN_LVX:
15788 case ALTIVEC_BUILTIN_LVX_V4SI:
15789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15790 exp, target, false);
15791 case ALTIVEC_BUILTIN_LVX_V8HI:
15792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15793 exp, target, false);
15794 case ALTIVEC_BUILTIN_LVX_V16QI:
15795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15796 exp, target, false);
15797 case ALTIVEC_BUILTIN_LVLX:
15798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15799 exp, target, true);
15800 case ALTIVEC_BUILTIN_LVLXL:
15801 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15802 exp, target, true);
15803 case ALTIVEC_BUILTIN_LVRX:
15804 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15805 exp, target, true);
15806 case ALTIVEC_BUILTIN_LVRXL:
15807 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15808 exp, target, true);
15809 case VSX_BUILTIN_LXVD2X_V1TI:
15810 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15811 exp, target, false);
15812 case VSX_BUILTIN_LXVD2X_V2DF:
15813 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15814 exp, target, false);
15815 case VSX_BUILTIN_LXVD2X_V2DI:
15816 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15817 exp, target, false);
15818 case VSX_BUILTIN_LXVW4X_V4SF:
15819 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15820 exp, target, false);
15821 case VSX_BUILTIN_LXVW4X_V4SI:
15822 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15823 exp, target, false);
15824 case VSX_BUILTIN_LXVW4X_V8HI:
15825 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15826 exp, target, false);
15827 case VSX_BUILTIN_LXVW4X_V16QI:
15828 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15829 exp, target, false);
15830 /* For the following on big endian, it's ok to use any appropriate
15831 unaligned-supporting load, so use a generic expander. For
15832 little-endian, the exact element-reversing instruction must
15833 be used. */
15834 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15836 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15837 : CODE_FOR_vsx_ld_elemrev_v2df);
15838 return altivec_expand_lv_builtin (code, exp, target, false);
15840 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15842 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15843 : CODE_FOR_vsx_ld_elemrev_v2di);
15844 return altivec_expand_lv_builtin (code, exp, target, false);
15846 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15848 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15849 : CODE_FOR_vsx_ld_elemrev_v4sf);
15850 return altivec_expand_lv_builtin (code, exp, target, false);
15852 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15854 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15855 : CODE_FOR_vsx_ld_elemrev_v4si);
15856 return altivec_expand_lv_builtin (code, exp, target, false);
15858 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15860 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15861 : CODE_FOR_vsx_ld_elemrev_v8hi);
15862 return altivec_expand_lv_builtin (code, exp, target, false);
15864 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15866 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15867 : CODE_FOR_vsx_ld_elemrev_v16qi);
15868 return altivec_expand_lv_builtin (code, exp, target, false);
15870 break;
15871 default:
15872 break;
15873 /* Fall through. */
15876 *expandedp = false;
15877 return NULL_RTX;
15880 /* Expand the builtin in EXP and store the result in TARGET. Store
15881 true in *EXPANDEDP if we found a builtin to expand. */
15882 static rtx
15883 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15885 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15886 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15887 const struct builtin_description *d;
15888 size_t i;
15890 *expandedp = true;
15892 switch (fcode)
15894 case PAIRED_BUILTIN_STX:
15895 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15896 case PAIRED_BUILTIN_LX:
15897 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15898 default:
15899 break;
15900 /* Fall through. */
15903 /* Expand the paired predicates. */
15904 d = bdesc_paired_preds;
15905 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15906 if (d->code == fcode)
15907 return paired_expand_predicate_builtin (d->icode, exp, target);
15909 *expandedp = false;
15910 return NULL_RTX;
15913 static rtx
15914 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15916 rtx pat, scratch, tmp;
15917 tree form = CALL_EXPR_ARG (exp, 0);
15918 tree arg0 = CALL_EXPR_ARG (exp, 1);
15919 tree arg1 = CALL_EXPR_ARG (exp, 2);
15920 rtx op0 = expand_normal (arg0);
15921 rtx op1 = expand_normal (arg1);
15922 machine_mode mode0 = insn_data[icode].operand[1].mode;
15923 machine_mode mode1 = insn_data[icode].operand[2].mode;
15924 int form_int;
15925 enum rtx_code code;
15927 if (TREE_CODE (form) != INTEGER_CST)
15929 error ("argument 1 of %s must be a constant",
15930 "__builtin_paired_predicate");
15931 return const0_rtx;
15933 else
15934 form_int = TREE_INT_CST_LOW (form);
15936 gcc_assert (mode0 == mode1);
15938 if (arg0 == error_mark_node || arg1 == error_mark_node)
15939 return const0_rtx;
15941 if (target == 0
15942 || GET_MODE (target) != SImode
15943 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15944 target = gen_reg_rtx (SImode);
15945 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15946 op0 = copy_to_mode_reg (mode0, op0);
15947 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15948 op1 = copy_to_mode_reg (mode1, op1);
15950 scratch = gen_reg_rtx (CCFPmode);
15952 pat = GEN_FCN (icode) (scratch, op0, op1);
15953 if (!pat)
15954 return const0_rtx;
15956 emit_insn (pat);
15958 switch (form_int)
15960 /* LT bit. */
15961 case 0:
15962 code = LT;
15963 break;
15964 /* GT bit. */
15965 case 1:
15966 code = GT;
15967 break;
15968 /* EQ bit. */
15969 case 2:
15970 code = EQ;
15971 break;
15972 /* UN bit. */
15973 case 3:
15974 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15975 return target;
15976 default:
15977 error ("argument 1 of %qs is out of range",
15978 "__builtin_paired_predicate");
15979 return const0_rtx;
15982 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15983 emit_move_insn (target, tmp);
15984 return target;
15987 /* Raise an error message for a builtin function that is called without the
15988 appropriate target options being set. */
15990 static void
15991 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15993 size_t uns_fncode = (size_t) fncode;
15994 const char *name = rs6000_builtin_info[uns_fncode].name;
15995 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15997 gcc_assert (name != NULL);
15998 if ((fnmask & RS6000_BTM_CELL) != 0)
15999 error ("builtin function %qs is only valid for the cell processor", name);
16000 else if ((fnmask & RS6000_BTM_VSX) != 0)
16001 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16002 else if ((fnmask & RS6000_BTM_HTM) != 0)
16003 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16004 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16005 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16006 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16007 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16008 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16009 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16010 error ("builtin function %qs requires the %qs and %qs options",
16011 name, "-mhard-dfp", "-mpower8-vector");
16012 else if ((fnmask & RS6000_BTM_DFP) != 0)
16013 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16014 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16015 error ("builtin function %qs requires the %qs option", name,
16016 "-mpower8-vector");
16017 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16018 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16019 error ("builtin function %qs requires the %qs and %qs options",
16020 name, "-mcpu=power9", "-m64");
16021 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16022 error ("builtin function %qs requires the %qs option", name,
16023 "-mcpu=power9");
16024 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16025 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16026 error ("builtin function %qs requires the %qs and %qs options",
16027 name, "-mcpu=power9", "-m64");
16028 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16029 error ("builtin function %qs requires the %qs option", name,
16030 "-mcpu=power9");
16031 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16032 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16033 error ("builtin function %qs requires the %qs and %qs options",
16034 name, "-mhard-float", "-mlong-double-128");
16035 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16036 error ("builtin function %qs requires the %qs option", name,
16037 "-mhard-float");
16038 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
16039 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16040 name);
16041 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16042 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16043 else
16044 error ("builtin function %qs is not supported with the current options",
16045 name);
16048 /* Target hook for early folding of built-ins, shamelessly stolen
16049 from ia64.c. */
16051 static tree
16052 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
16053 int n_args ATTRIBUTE_UNUSED,
16054 tree *args ATTRIBUTE_UNUSED,
16055 bool ignore ATTRIBUTE_UNUSED)
16057 #ifdef SUBTARGET_FOLD_BUILTIN
16058 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16059 #else
16060 return NULL_TREE;
16061 #endif
16064 /* Helper function to sort out which built-ins may be valid without having
16065 a LHS. */
16066 static bool
16067 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
16069 switch (fn_code)
16071 case ALTIVEC_BUILTIN_STVX_V16QI:
16072 case ALTIVEC_BUILTIN_STVX_V8HI:
16073 case ALTIVEC_BUILTIN_STVX_V4SI:
16074 case ALTIVEC_BUILTIN_STVX_V4SF:
16075 case ALTIVEC_BUILTIN_STVX_V2DI:
16076 case ALTIVEC_BUILTIN_STVX_V2DF:
16077 return true;
16078 default:
16079 return false;
16083 /* Helper function to handle the gimple folding of a vector compare
16084 operation. This sets up true/false vectors, and uses the
16085 VEC_COND_EXPR operation.
16086 CODE indicates which comparison is to be made. (EQ, GT, ...).
16087 TYPE indicates the type of the result. */
16088 static tree
16089 fold_build_vec_cmp (tree_code code, tree type,
16090 tree arg0, tree arg1)
16092 tree cmp_type = build_same_sized_truth_vector_type (type);
16093 tree zero_vec = build_zero_cst (type);
16094 tree minus_one_vec = build_minus_one_cst (type);
16095 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
16096 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
16099 /* Helper function to handle the in-between steps for the
16100 vector compare built-ins. */
16101 static void
16102 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
16104 tree arg0 = gimple_call_arg (stmt, 0);
16105 tree arg1 = gimple_call_arg (stmt, 1);
16106 tree lhs = gimple_call_lhs (stmt);
16107 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
16108 gimple *g = gimple_build_assign (lhs, cmp);
16109 gimple_set_location (g, gimple_location (stmt));
16110 gsi_replace (gsi, g, true);
16113 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16114 a constant, use rs6000_fold_builtin.) */
16116 bool
16117 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16119 gimple *stmt = gsi_stmt (*gsi);
16120 tree fndecl = gimple_call_fndecl (stmt);
16121 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16122 enum rs6000_builtins fn_code
16123 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16124 tree arg0, arg1, lhs, temp;
16125 gimple *g;
16127 size_t uns_fncode = (size_t) fn_code;
16128 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16129 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16130 const char *fn_name2 = (icode != CODE_FOR_nothing)
16131 ? get_insn_name ((int) icode)
16132 : "nothing";
16134 if (TARGET_DEBUG_BUILTIN)
16135 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16136 fn_code, fn_name1, fn_name2);
16138 if (!rs6000_fold_gimple)
16139 return false;
16141 /* Prevent gimple folding for code that does not have a LHS, unless it is
16142 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16143 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
16144 return false;
16146 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
16147 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
16148 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
16149 if (!func_valid_p)
16150 return false;
16152 switch (fn_code)
16154 /* Flavors of vec_add. We deliberately don't expand
16155 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16156 TImode, resulting in much poorer code generation. */
16157 case ALTIVEC_BUILTIN_VADDUBM:
16158 case ALTIVEC_BUILTIN_VADDUHM:
16159 case ALTIVEC_BUILTIN_VADDUWM:
16160 case P8V_BUILTIN_VADDUDM:
16161 case ALTIVEC_BUILTIN_VADDFP:
16162 case VSX_BUILTIN_XVADDDP:
16163 arg0 = gimple_call_arg (stmt, 0);
16164 arg1 = gimple_call_arg (stmt, 1);
16165 lhs = gimple_call_lhs (stmt);
16166 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16167 gimple_set_location (g, gimple_location (stmt));
16168 gsi_replace (gsi, g, true);
16169 return true;
16170 /* Flavors of vec_sub. We deliberately don't expand
16171 P8V_BUILTIN_VSUBUQM. */
16172 case ALTIVEC_BUILTIN_VSUBUBM:
16173 case ALTIVEC_BUILTIN_VSUBUHM:
16174 case ALTIVEC_BUILTIN_VSUBUWM:
16175 case P8V_BUILTIN_VSUBUDM:
16176 case ALTIVEC_BUILTIN_VSUBFP:
16177 case VSX_BUILTIN_XVSUBDP:
16178 arg0 = gimple_call_arg (stmt, 0);
16179 arg1 = gimple_call_arg (stmt, 1);
16180 lhs = gimple_call_lhs (stmt);
16181 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16182 gimple_set_location (g, gimple_location (stmt));
16183 gsi_replace (gsi, g, true);
16184 return true;
16185 case VSX_BUILTIN_XVMULSP:
16186 case VSX_BUILTIN_XVMULDP:
16187 arg0 = gimple_call_arg (stmt, 0);
16188 arg1 = gimple_call_arg (stmt, 1);
16189 lhs = gimple_call_lhs (stmt);
16190 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16191 gimple_set_location (g, gimple_location (stmt));
16192 gsi_replace (gsi, g, true);
16193 return true;
16194 /* Even element flavors of vec_mul (signed). */
16195 case ALTIVEC_BUILTIN_VMULESB:
16196 case ALTIVEC_BUILTIN_VMULESH:
16197 case ALTIVEC_BUILTIN_VMULESW:
16198 /* Even element flavors of vec_mul (unsigned). */
16199 case ALTIVEC_BUILTIN_VMULEUB:
16200 case ALTIVEC_BUILTIN_VMULEUH:
16201 case ALTIVEC_BUILTIN_VMULEUW:
16202 arg0 = gimple_call_arg (stmt, 0);
16203 arg1 = gimple_call_arg (stmt, 1);
16204 lhs = gimple_call_lhs (stmt);
16205 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16206 gimple_set_location (g, gimple_location (stmt));
16207 gsi_replace (gsi, g, true);
16208 return true;
16209 /* Odd element flavors of vec_mul (signed). */
16210 case ALTIVEC_BUILTIN_VMULOSB:
16211 case ALTIVEC_BUILTIN_VMULOSH:
16212 case ALTIVEC_BUILTIN_VMULOSW:
16213 /* Odd element flavors of vec_mul (unsigned). */
16214 case ALTIVEC_BUILTIN_VMULOUB:
16215 case ALTIVEC_BUILTIN_VMULOUH:
16216 case ALTIVEC_BUILTIN_VMULOUW:
16217 arg0 = gimple_call_arg (stmt, 0);
16218 arg1 = gimple_call_arg (stmt, 1);
16219 lhs = gimple_call_lhs (stmt);
16220 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16221 gimple_set_location (g, gimple_location (stmt));
16222 gsi_replace (gsi, g, true);
16223 return true;
16224 /* Flavors of vec_div (Integer). */
16225 case VSX_BUILTIN_DIV_V2DI:
16226 case VSX_BUILTIN_UDIV_V2DI:
16227 arg0 = gimple_call_arg (stmt, 0);
16228 arg1 = gimple_call_arg (stmt, 1);
16229 lhs = gimple_call_lhs (stmt);
16230 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16231 gimple_set_location (g, gimple_location (stmt));
16232 gsi_replace (gsi, g, true);
16233 return true;
16234 /* Flavors of vec_div (Float). */
16235 case VSX_BUILTIN_XVDIVSP:
16236 case VSX_BUILTIN_XVDIVDP:
16237 arg0 = gimple_call_arg (stmt, 0);
16238 arg1 = gimple_call_arg (stmt, 1);
16239 lhs = gimple_call_lhs (stmt);
16240 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16241 gimple_set_location (g, gimple_location (stmt));
16242 gsi_replace (gsi, g, true);
16243 return true;
16244 /* Flavors of vec_and. */
16245 case ALTIVEC_BUILTIN_VAND:
16246 arg0 = gimple_call_arg (stmt, 0);
16247 arg1 = gimple_call_arg (stmt, 1);
16248 lhs = gimple_call_lhs (stmt);
16249 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16250 gimple_set_location (g, gimple_location (stmt));
16251 gsi_replace (gsi, g, true);
16252 return true;
16253 /* Flavors of vec_andc. */
16254 case ALTIVEC_BUILTIN_VANDC:
16255 arg0 = gimple_call_arg (stmt, 0);
16256 arg1 = gimple_call_arg (stmt, 1);
16257 lhs = gimple_call_lhs (stmt);
16258 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16259 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16260 gimple_set_location (g, gimple_location (stmt));
16261 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16262 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16263 gimple_set_location (g, gimple_location (stmt));
16264 gsi_replace (gsi, g, true);
16265 return true;
16266 /* Flavors of vec_nand. */
16267 case P8V_BUILTIN_VEC_NAND:
16268 case P8V_BUILTIN_NAND_V16QI:
16269 case P8V_BUILTIN_NAND_V8HI:
16270 case P8V_BUILTIN_NAND_V4SI:
16271 case P8V_BUILTIN_NAND_V4SF:
16272 case P8V_BUILTIN_NAND_V2DF:
16273 case P8V_BUILTIN_NAND_V2DI:
16274 arg0 = gimple_call_arg (stmt, 0);
16275 arg1 = gimple_call_arg (stmt, 1);
16276 lhs = gimple_call_lhs (stmt);
16277 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16278 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
16279 gimple_set_location (g, gimple_location (stmt));
16280 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16281 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16282 gimple_set_location (g, gimple_location (stmt));
16283 gsi_replace (gsi, g, true);
16284 return true;
16285 /* Flavors of vec_or. */
16286 case ALTIVEC_BUILTIN_VOR:
16287 arg0 = gimple_call_arg (stmt, 0);
16288 arg1 = gimple_call_arg (stmt, 1);
16289 lhs = gimple_call_lhs (stmt);
16290 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16291 gimple_set_location (g, gimple_location (stmt));
16292 gsi_replace (gsi, g, true);
16293 return true;
16294 /* flavors of vec_orc. */
16295 case P8V_BUILTIN_ORC_V16QI:
16296 case P8V_BUILTIN_ORC_V8HI:
16297 case P8V_BUILTIN_ORC_V4SI:
16298 case P8V_BUILTIN_ORC_V4SF:
16299 case P8V_BUILTIN_ORC_V2DF:
16300 case P8V_BUILTIN_ORC_V2DI:
16301 arg0 = gimple_call_arg (stmt, 0);
16302 arg1 = gimple_call_arg (stmt, 1);
16303 lhs = gimple_call_lhs (stmt);
16304 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16305 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
16306 gimple_set_location (g, gimple_location (stmt));
16307 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16308 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16309 gimple_set_location (g, gimple_location (stmt));
16310 gsi_replace (gsi, g, true);
16311 return true;
16312 /* Flavors of vec_xor. */
16313 case ALTIVEC_BUILTIN_VXOR:
16314 arg0 = gimple_call_arg (stmt, 0);
16315 arg1 = gimple_call_arg (stmt, 1);
16316 lhs = gimple_call_lhs (stmt);
16317 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16318 gimple_set_location (g, gimple_location (stmt));
16319 gsi_replace (gsi, g, true);
16320 return true;
16321 /* Flavors of vec_nor. */
16322 case ALTIVEC_BUILTIN_VNOR:
16323 arg0 = gimple_call_arg (stmt, 0);
16324 arg1 = gimple_call_arg (stmt, 1);
16325 lhs = gimple_call_lhs (stmt);
16326 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16327 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16328 gimple_set_location (g, gimple_location (stmt));
16329 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16330 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16331 gimple_set_location (g, gimple_location (stmt));
16332 gsi_replace (gsi, g, true);
16333 return true;
16334 /* flavors of vec_abs. */
16335 case ALTIVEC_BUILTIN_ABS_V16QI:
16336 case ALTIVEC_BUILTIN_ABS_V8HI:
16337 case ALTIVEC_BUILTIN_ABS_V4SI:
16338 case ALTIVEC_BUILTIN_ABS_V4SF:
16339 case P8V_BUILTIN_ABS_V2DI:
16340 case VSX_BUILTIN_XVABSDP:
16341 arg0 = gimple_call_arg (stmt, 0);
16342 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16343 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16344 return false;
16345 lhs = gimple_call_lhs (stmt);
16346 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16347 gimple_set_location (g, gimple_location (stmt));
16348 gsi_replace (gsi, g, true);
16349 return true;
16350 /* flavors of vec_min. */
16351 case VSX_BUILTIN_XVMINDP:
16352 case P8V_BUILTIN_VMINSD:
16353 case P8V_BUILTIN_VMINUD:
16354 case ALTIVEC_BUILTIN_VMINSB:
16355 case ALTIVEC_BUILTIN_VMINSH:
16356 case ALTIVEC_BUILTIN_VMINSW:
16357 case ALTIVEC_BUILTIN_VMINUB:
16358 case ALTIVEC_BUILTIN_VMINUH:
16359 case ALTIVEC_BUILTIN_VMINUW:
16360 case ALTIVEC_BUILTIN_VMINFP:
16361 arg0 = gimple_call_arg (stmt, 0);
16362 arg1 = gimple_call_arg (stmt, 1);
16363 lhs = gimple_call_lhs (stmt);
16364 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16365 gimple_set_location (g, gimple_location (stmt));
16366 gsi_replace (gsi, g, true);
16367 return true;
16368 /* flavors of vec_max. */
16369 case VSX_BUILTIN_XVMAXDP:
16370 case P8V_BUILTIN_VMAXSD:
16371 case P8V_BUILTIN_VMAXUD:
16372 case ALTIVEC_BUILTIN_VMAXSB:
16373 case ALTIVEC_BUILTIN_VMAXSH:
16374 case ALTIVEC_BUILTIN_VMAXSW:
16375 case ALTIVEC_BUILTIN_VMAXUB:
16376 case ALTIVEC_BUILTIN_VMAXUH:
16377 case ALTIVEC_BUILTIN_VMAXUW:
16378 case ALTIVEC_BUILTIN_VMAXFP:
16379 arg0 = gimple_call_arg (stmt, 0);
16380 arg1 = gimple_call_arg (stmt, 1);
16381 lhs = gimple_call_lhs (stmt);
16382 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16383 gimple_set_location (g, gimple_location (stmt));
16384 gsi_replace (gsi, g, true);
16385 return true;
16386 /* Flavors of vec_eqv. */
16387 case P8V_BUILTIN_EQV_V16QI:
16388 case P8V_BUILTIN_EQV_V8HI:
16389 case P8V_BUILTIN_EQV_V4SI:
16390 case P8V_BUILTIN_EQV_V4SF:
16391 case P8V_BUILTIN_EQV_V2DF:
16392 case P8V_BUILTIN_EQV_V2DI:
16393 arg0 = gimple_call_arg (stmt, 0);
16394 arg1 = gimple_call_arg (stmt, 1);
16395 lhs = gimple_call_lhs (stmt);
16396 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16397 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16398 gimple_set_location (g, gimple_location (stmt));
16399 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16400 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16401 gimple_set_location (g, gimple_location (stmt));
16402 gsi_replace (gsi, g, true);
16403 return true;
16404 /* Flavors of vec_rotate_left. */
16405 case ALTIVEC_BUILTIN_VRLB:
16406 case ALTIVEC_BUILTIN_VRLH:
16407 case ALTIVEC_BUILTIN_VRLW:
16408 case P8V_BUILTIN_VRLD:
16409 arg0 = gimple_call_arg (stmt, 0);
16410 arg1 = gimple_call_arg (stmt, 1);
16411 lhs = gimple_call_lhs (stmt);
16412 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16413 gimple_set_location (g, gimple_location (stmt));
16414 gsi_replace (gsi, g, true);
16415 return true;
16416 /* Flavors of vector shift right algebraic.
16417 vec_sra{b,h,w} -> vsra{b,h,w}. */
16418 case ALTIVEC_BUILTIN_VSRAB:
16419 case ALTIVEC_BUILTIN_VSRAH:
16420 case ALTIVEC_BUILTIN_VSRAW:
16421 case P8V_BUILTIN_VSRAD:
16422 arg0 = gimple_call_arg (stmt, 0);
16423 arg1 = gimple_call_arg (stmt, 1);
16424 lhs = gimple_call_lhs (stmt);
16425 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16426 gimple_set_location (g, gimple_location (stmt));
16427 gsi_replace (gsi, g, true);
16428 return true;
16429 /* Flavors of vector shift left.
16430 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16431 case ALTIVEC_BUILTIN_VSLB:
16432 case ALTIVEC_BUILTIN_VSLH:
16433 case ALTIVEC_BUILTIN_VSLW:
16434 case P8V_BUILTIN_VSLD:
16435 arg0 = gimple_call_arg (stmt, 0);
16436 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16437 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16438 return false;
16439 arg1 = gimple_call_arg (stmt, 1);
16440 lhs = gimple_call_lhs (stmt);
16441 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16442 gimple_set_location (g, gimple_location (stmt));
16443 gsi_replace (gsi, g, true);
16444 return true;
16445 /* Flavors of vector shift right. */
16446 case ALTIVEC_BUILTIN_VSRB:
16447 case ALTIVEC_BUILTIN_VSRH:
16448 case ALTIVEC_BUILTIN_VSRW:
16449 case P8V_BUILTIN_VSRD:
16451 arg0 = gimple_call_arg (stmt, 0);
16452 arg1 = gimple_call_arg (stmt, 1);
16453 lhs = gimple_call_lhs (stmt);
16454 gimple_seq stmts = NULL;
16455 /* Convert arg0 to unsigned. */
16456 tree arg0_unsigned
16457 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16458 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16459 tree res
16460 = gimple_build (&stmts, RSHIFT_EXPR,
16461 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16462 /* Convert result back to the lhs type. */
16463 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16464 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16465 update_call_from_tree (gsi, res);
16466 return true;
16468 /* Vector loads. */
16469 case ALTIVEC_BUILTIN_LVX_V16QI:
16470 case ALTIVEC_BUILTIN_LVX_V8HI:
16471 case ALTIVEC_BUILTIN_LVX_V4SI:
16472 case ALTIVEC_BUILTIN_LVX_V4SF:
16473 case ALTIVEC_BUILTIN_LVX_V2DI:
16474 case ALTIVEC_BUILTIN_LVX_V2DF:
16476 arg0 = gimple_call_arg (stmt, 0); // offset
16477 arg1 = gimple_call_arg (stmt, 1); // address
16478 /* Do not fold for -maltivec=be on LE targets. */
16479 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16480 return false;
16481 lhs = gimple_call_lhs (stmt);
16482 location_t loc = gimple_location (stmt);
16483 /* Since arg1 may be cast to a different type, just use ptr_type_node
16484 here instead of trying to enforce TBAA on pointer types. */
16485 tree arg1_type = ptr_type_node;
16486 tree lhs_type = TREE_TYPE (lhs);
16487 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16488 the tree using the value from arg0. The resulting type will match
16489 the type of arg1. */
16490 gimple_seq stmts = NULL;
16491 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16492 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16493 arg1_type, arg1, temp_offset);
16494 /* Mask off any lower bits from the address. */
16495 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16496 arg1_type, temp_addr,
16497 build_int_cst (arg1_type, -16));
16498 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16499 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16500 take an offset, but since we've already incorporated the offset
16501 above, here we just pass in a zero. */
16502 gimple *g
16503 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
16504 build_int_cst (arg1_type, 0)));
16505 gimple_set_location (g, loc);
16506 gsi_replace (gsi, g, true);
16507 return true;
16509 /* Vector stores. */
16510 case ALTIVEC_BUILTIN_STVX_V16QI:
16511 case ALTIVEC_BUILTIN_STVX_V8HI:
16512 case ALTIVEC_BUILTIN_STVX_V4SI:
16513 case ALTIVEC_BUILTIN_STVX_V4SF:
16514 case ALTIVEC_BUILTIN_STVX_V2DI:
16515 case ALTIVEC_BUILTIN_STVX_V2DF:
16517 /* Do not fold for -maltivec=be on LE targets. */
16518 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16519 return false;
16520 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16521 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16522 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16523 location_t loc = gimple_location (stmt);
16524 tree arg0_type = TREE_TYPE (arg0);
16525 /* Use ptr_type_node (no TBAA) for the arg2_type.
16526 FIXME: (Richard) "A proper fix would be to transition this type as
16527 seen from the frontend to GIMPLE, for example in a similar way we
16528 do for MEM_REFs by piggy-backing that on an extra argument, a
16529 constant zero pointer of the alias pointer type to use (which would
16530 also serve as a type indicator of the store itself). I'd use a
16531 target specific internal function for this (not sure if we can have
16532 those target specific, but I guess if it's folded away then that's
16533 fine) and get away with the overload set." */
16534 tree arg2_type = ptr_type_node;
16535 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16536 the tree using the value from arg0. The resulting type will match
16537 the type of arg2. */
16538 gimple_seq stmts = NULL;
16539 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16540 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16541 arg2_type, arg2, temp_offset);
16542 /* Mask off any lower bits from the address. */
16543 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16544 arg2_type, temp_addr,
16545 build_int_cst (arg2_type, -16));
16546 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16547 /* The desired gimple result should be similar to:
16548 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16549 gimple *g
16550 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
16551 build_int_cst (arg2_type, 0)), arg0);
16552 gimple_set_location (g, loc);
16553 gsi_replace (gsi, g, true);
16554 return true;
16557 /* Vector Fused multiply-add (fma). */
16558 case ALTIVEC_BUILTIN_VMADDFP:
16559 case VSX_BUILTIN_XVMADDDP:
16560 case ALTIVEC_BUILTIN_VMLADDUHM:
16562 arg0 = gimple_call_arg (stmt, 0);
16563 arg1 = gimple_call_arg (stmt, 1);
16564 tree arg2 = gimple_call_arg (stmt, 2);
16565 lhs = gimple_call_lhs (stmt);
16566 gimple *g = gimple_build_assign (lhs, FMA_EXPR, arg0, arg1, arg2);
16567 gimple_set_location (g, gimple_location (stmt));
16568 gsi_replace (gsi, g, true);
16569 return true;
16572 /* Vector compares; EQ, NE, GE, GT, LE. */
16573 case ALTIVEC_BUILTIN_VCMPEQUB:
16574 case ALTIVEC_BUILTIN_VCMPEQUH:
16575 case ALTIVEC_BUILTIN_VCMPEQUW:
16576 case P8V_BUILTIN_VCMPEQUD:
16577 fold_compare_helper (gsi, EQ_EXPR, stmt);
16578 return true;
16580 case P9V_BUILTIN_CMPNEB:
16581 case P9V_BUILTIN_CMPNEH:
16582 case P9V_BUILTIN_CMPNEW:
16583 fold_compare_helper (gsi, NE_EXPR, stmt);
16584 return true;
16586 case VSX_BUILTIN_CMPGE_16QI:
16587 case VSX_BUILTIN_CMPGE_U16QI:
16588 case VSX_BUILTIN_CMPGE_8HI:
16589 case VSX_BUILTIN_CMPGE_U8HI:
16590 case VSX_BUILTIN_CMPGE_4SI:
16591 case VSX_BUILTIN_CMPGE_U4SI:
16592 case VSX_BUILTIN_CMPGE_2DI:
16593 case VSX_BUILTIN_CMPGE_U2DI:
16594 fold_compare_helper (gsi, GE_EXPR, stmt);
16595 return true;
16597 case ALTIVEC_BUILTIN_VCMPGTSB:
16598 case ALTIVEC_BUILTIN_VCMPGTUB:
16599 case ALTIVEC_BUILTIN_VCMPGTSH:
16600 case ALTIVEC_BUILTIN_VCMPGTUH:
16601 case ALTIVEC_BUILTIN_VCMPGTSW:
16602 case ALTIVEC_BUILTIN_VCMPGTUW:
16603 case P8V_BUILTIN_VCMPGTUD:
16604 case P8V_BUILTIN_VCMPGTSD:
16605 fold_compare_helper (gsi, GT_EXPR, stmt);
16606 return true;
16608 case VSX_BUILTIN_CMPLE_16QI:
16609 case VSX_BUILTIN_CMPLE_U16QI:
16610 case VSX_BUILTIN_CMPLE_8HI:
16611 case VSX_BUILTIN_CMPLE_U8HI:
16612 case VSX_BUILTIN_CMPLE_4SI:
16613 case VSX_BUILTIN_CMPLE_U4SI:
16614 case VSX_BUILTIN_CMPLE_2DI:
16615 case VSX_BUILTIN_CMPLE_U2DI:
16616 fold_compare_helper (gsi, LE_EXPR, stmt);
16617 return true;
16619 default:
16620 if (TARGET_DEBUG_BUILTIN)
16621 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16622 fn_code, fn_name1, fn_name2);
16623 break;
16626 return false;
16629 /* Expand an expression EXP that calls a built-in function,
16630 with result going to TARGET if that's convenient
16631 (and in mode MODE if that's convenient).
16632 SUBTARGET may be used as the target for computing one of EXP's operands.
16633 IGNORE is nonzero if the value is to be ignored. */
16635 static rtx
16636 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16637 machine_mode mode ATTRIBUTE_UNUSED,
16638 int ignore ATTRIBUTE_UNUSED)
16640 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16641 enum rs6000_builtins fcode
16642 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16643 size_t uns_fcode = (size_t)fcode;
16644 const struct builtin_description *d;
16645 size_t i;
16646 rtx ret;
16647 bool success;
16648 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16649 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16650 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16652 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16653 floating point type, depending on whether long double is the IBM extended
16654 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16655 we only define one variant of the built-in function, and switch the code
16656 when defining it, rather than defining two built-ins and using the
16657 overload table in rs6000-c.c to switch between the two. If we don't have
16658 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16659 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16660 #ifdef HAVE_AS_POWER9
16661 if (FLOAT128_IEEE_P (TFmode))
16662 switch (icode)
16664 default:
16665 break;
16667 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16668 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16669 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16670 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16671 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16672 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16673 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16674 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16675 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16676 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16677 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16678 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16679 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16681 #endif
16683 if (TARGET_DEBUG_BUILTIN)
16685 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16686 const char *name2 = (icode != CODE_FOR_nothing)
16687 ? get_insn_name ((int) icode)
16688 : "nothing";
16689 const char *name3;
16691 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16693 default: name3 = "unknown"; break;
16694 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16695 case RS6000_BTC_UNARY: name3 = "unary"; break;
16696 case RS6000_BTC_BINARY: name3 = "binary"; break;
16697 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16698 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16699 case RS6000_BTC_ABS: name3 = "abs"; break;
16700 case RS6000_BTC_DST: name3 = "dst"; break;
16704 fprintf (stderr,
16705 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16706 (name1) ? name1 : "---", fcode,
16707 (name2) ? name2 : "---", (int) icode,
16708 name3,
16709 func_valid_p ? "" : ", not valid");
16712 if (!func_valid_p)
16714 rs6000_invalid_builtin (fcode);
16716 /* Given it is invalid, just generate a normal call. */
16717 return expand_call (exp, target, ignore);
16720 switch (fcode)
16722 case RS6000_BUILTIN_RECIP:
16723 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16725 case RS6000_BUILTIN_RECIPF:
16726 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16728 case RS6000_BUILTIN_RSQRTF:
16729 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16731 case RS6000_BUILTIN_RSQRT:
16732 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16734 case POWER7_BUILTIN_BPERMD:
16735 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16736 ? CODE_FOR_bpermd_di
16737 : CODE_FOR_bpermd_si), exp, target);
16739 case RS6000_BUILTIN_GET_TB:
16740 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16741 target);
16743 case RS6000_BUILTIN_MFTB:
16744 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16745 ? CODE_FOR_rs6000_mftb_di
16746 : CODE_FOR_rs6000_mftb_si),
16747 target);
16749 case RS6000_BUILTIN_MFFS:
16750 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16752 case RS6000_BUILTIN_MTFSF:
16753 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16755 case RS6000_BUILTIN_CPU_INIT:
16756 case RS6000_BUILTIN_CPU_IS:
16757 case RS6000_BUILTIN_CPU_SUPPORTS:
16758 return cpu_expand_builtin (fcode, exp, target);
16760 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16761 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16763 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16764 : (int) CODE_FOR_altivec_lvsl_direct);
16765 machine_mode tmode = insn_data[icode2].operand[0].mode;
16766 machine_mode mode = insn_data[icode2].operand[1].mode;
16767 tree arg;
16768 rtx op, addr, pat;
16770 gcc_assert (TARGET_ALTIVEC);
16772 arg = CALL_EXPR_ARG (exp, 0);
16773 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16774 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16775 addr = memory_address (mode, op);
16776 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16777 op = addr;
16778 else
16780 /* For the load case need to negate the address. */
16781 op = gen_reg_rtx (GET_MODE (addr));
16782 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16784 op = gen_rtx_MEM (mode, op);
16786 if (target == 0
16787 || GET_MODE (target) != tmode
16788 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16789 target = gen_reg_rtx (tmode);
16791 pat = GEN_FCN (icode2) (target, op);
16792 if (!pat)
16793 return 0;
16794 emit_insn (pat);
16796 return target;
16799 case ALTIVEC_BUILTIN_VCFUX:
16800 case ALTIVEC_BUILTIN_VCFSX:
16801 case ALTIVEC_BUILTIN_VCTUXS:
16802 case ALTIVEC_BUILTIN_VCTSXS:
16803 /* FIXME: There's got to be a nicer way to handle this case than
16804 constructing a new CALL_EXPR. */
16805 if (call_expr_nargs (exp) == 1)
16807 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16808 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16810 break;
16812 default:
16813 break;
16816 if (TARGET_ALTIVEC)
16818 ret = altivec_expand_builtin (exp, target, &success);
16820 if (success)
16821 return ret;
16823 if (TARGET_PAIRED_FLOAT)
16825 ret = paired_expand_builtin (exp, target, &success);
16827 if (success)
16828 return ret;
16830 if (TARGET_HTM)
16832 ret = htm_expand_builtin (exp, target, &success);
16834 if (success)
16835 return ret;
16838 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16839 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16840 gcc_assert (attr == RS6000_BTC_UNARY
16841 || attr == RS6000_BTC_BINARY
16842 || attr == RS6000_BTC_TERNARY
16843 || attr == RS6000_BTC_SPECIAL);
16845 /* Handle simple unary operations. */
16846 d = bdesc_1arg;
16847 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16848 if (d->code == fcode)
16849 return rs6000_expand_unop_builtin (icode, exp, target);
16851 /* Handle simple binary operations. */
16852 d = bdesc_2arg;
16853 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16854 if (d->code == fcode)
16855 return rs6000_expand_binop_builtin (icode, exp, target);
16857 /* Handle simple ternary operations. */
16858 d = bdesc_3arg;
16859 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16860 if (d->code == fcode)
16861 return rs6000_expand_ternop_builtin (icode, exp, target);
16863 /* Handle simple no-argument operations. */
16864 d = bdesc_0arg;
16865 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16866 if (d->code == fcode)
16867 return rs6000_expand_zeroop_builtin (icode, target);
16869 gcc_unreachable ();
16872 /* Create a builtin vector type with a name. Taking care not to give
16873 the canonical type a name. */
16875 static tree
16876 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16878 tree result = build_vector_type (elt_type, num_elts);
16880 /* Copy so we don't give the canonical type a name. */
16881 result = build_variant_type_copy (result);
16883 add_builtin_type (name, result);
16885 return result;
16888 static void
16889 rs6000_init_builtins (void)
16891 tree tdecl;
16892 tree ftype;
16893 machine_mode mode;
16895 if (TARGET_DEBUG_BUILTIN)
16896 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16897 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16898 (TARGET_ALTIVEC) ? ", altivec" : "",
16899 (TARGET_VSX) ? ", vsx" : "");
16901 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16902 V2SF_type_node = build_vector_type (float_type_node, 2);
16903 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16904 : "__vector long long",
16905 intDI_type_node, 2);
16906 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16907 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16908 intSI_type_node, 4);
16909 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16910 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16911 intHI_type_node, 8);
16912 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16913 intQI_type_node, 16);
16915 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16916 unsigned_intQI_type_node, 16);
16917 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16918 unsigned_intHI_type_node, 8);
16919 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16920 unsigned_intSI_type_node, 4);
16921 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16922 ? "__vector unsigned long"
16923 : "__vector unsigned long long",
16924 unsigned_intDI_type_node, 2);
16926 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16927 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16928 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16929 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16931 const_str_type_node
16932 = build_pointer_type (build_qualified_type (char_type_node,
16933 TYPE_QUAL_CONST));
16935 /* We use V1TI mode as a special container to hold __int128_t items that
16936 must live in VSX registers. */
16937 if (intTI_type_node)
16939 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16940 intTI_type_node, 1);
16941 unsigned_V1TI_type_node
16942 = rs6000_vector_type ("__vector unsigned __int128",
16943 unsigned_intTI_type_node, 1);
16946 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16947 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16948 'vector unsigned short'. */
16950 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16951 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16952 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16953 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16954 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16956 long_integer_type_internal_node = long_integer_type_node;
16957 long_unsigned_type_internal_node = long_unsigned_type_node;
16958 long_long_integer_type_internal_node = long_long_integer_type_node;
16959 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16960 intQI_type_internal_node = intQI_type_node;
16961 uintQI_type_internal_node = unsigned_intQI_type_node;
16962 intHI_type_internal_node = intHI_type_node;
16963 uintHI_type_internal_node = unsigned_intHI_type_node;
16964 intSI_type_internal_node = intSI_type_node;
16965 uintSI_type_internal_node = unsigned_intSI_type_node;
16966 intDI_type_internal_node = intDI_type_node;
16967 uintDI_type_internal_node = unsigned_intDI_type_node;
16968 intTI_type_internal_node = intTI_type_node;
16969 uintTI_type_internal_node = unsigned_intTI_type_node;
16970 float_type_internal_node = float_type_node;
16971 double_type_internal_node = double_type_node;
16972 long_double_type_internal_node = long_double_type_node;
16973 dfloat64_type_internal_node = dfloat64_type_node;
16974 dfloat128_type_internal_node = dfloat128_type_node;
16975 void_type_internal_node = void_type_node;
16977 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16978 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16979 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16980 format that uses a pair of doubles, depending on the switches and
16981 defaults.
16983 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16984 floating point, we need make sure the type is non-zero or else self-test
16985 fails during bootstrap.
16987 We don't register a built-in type for __ibm128 if the type is the same as
16988 long double. Instead we add a #define for __ibm128 in
16989 rs6000_cpu_cpp_builtins to long double.
16991 For IEEE 128-bit floating point, always create the type __ieee128. If the
16992 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16993 __ieee128. */
16994 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16996 ibm128_float_type_node = make_node (REAL_TYPE);
16997 TYPE_PRECISION (ibm128_float_type_node) = 128;
16998 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16999 layout_type (ibm128_float_type_node);
17001 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17002 "__ibm128");
17004 else
17005 ibm128_float_type_node = long_double_type_node;
17007 if (TARGET_FLOAT128_TYPE)
17009 ieee128_float_type_node = float128_type_node;
17010 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17011 "__ieee128");
17014 else
17015 ieee128_float_type_node = long_double_type_node;
17017 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17018 tree type node. */
17019 builtin_mode_to_type[QImode][0] = integer_type_node;
17020 builtin_mode_to_type[HImode][0] = integer_type_node;
17021 builtin_mode_to_type[SImode][0] = intSI_type_node;
17022 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17023 builtin_mode_to_type[DImode][0] = intDI_type_node;
17024 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17025 builtin_mode_to_type[TImode][0] = intTI_type_node;
17026 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17027 builtin_mode_to_type[SFmode][0] = float_type_node;
17028 builtin_mode_to_type[DFmode][0] = double_type_node;
17029 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17030 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17031 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17032 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17033 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17034 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17035 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17036 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17037 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17038 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17039 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17040 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17041 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17042 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17043 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17044 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17045 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17046 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17047 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17049 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17050 TYPE_NAME (bool_char_type_node) = tdecl;
17052 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17053 TYPE_NAME (bool_short_type_node) = tdecl;
17055 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17056 TYPE_NAME (bool_int_type_node) = tdecl;
17058 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17059 TYPE_NAME (pixel_type_node) = tdecl;
17061 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17062 bool_char_type_node, 16);
17063 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17064 bool_short_type_node, 8);
17065 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17066 bool_int_type_node, 4);
17067 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17068 ? "__vector __bool long"
17069 : "__vector __bool long long",
17070 bool_long_type_node, 2);
17071 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17072 pixel_type_node, 8);
17074 /* Paired builtins are only available if you build a compiler with the
17075 appropriate options, so only create those builtins with the appropriate
17076 compiler option. Create Altivec and VSX builtins on machines with at
17077 least the general purpose extensions (970 and newer) to allow the use of
17078 the target attribute. */
17079 if (TARGET_PAIRED_FLOAT)
17080 paired_init_builtins ();
17081 if (TARGET_EXTRA_BUILTINS)
17082 altivec_init_builtins ();
17083 if (TARGET_HTM)
17084 htm_init_builtins ();
17086 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17087 rs6000_common_init_builtins ();
17089 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17090 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17091 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17093 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17094 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17095 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17097 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17098 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17099 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17101 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17102 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17103 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17105 mode = (TARGET_64BIT) ? DImode : SImode;
17106 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17107 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17108 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17110 ftype = build_function_type_list (unsigned_intDI_type_node,
17111 NULL_TREE);
17112 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17114 if (TARGET_64BIT)
17115 ftype = build_function_type_list (unsigned_intDI_type_node,
17116 NULL_TREE);
17117 else
17118 ftype = build_function_type_list (unsigned_intSI_type_node,
17119 NULL_TREE);
17120 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17122 ftype = build_function_type_list (double_type_node, NULL_TREE);
17123 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17125 ftype = build_function_type_list (void_type_node,
17126 intSI_type_node, double_type_node,
17127 NULL_TREE);
17128 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17130 ftype = build_function_type_list (void_type_node, NULL_TREE);
17131 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17133 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17134 NULL_TREE);
17135 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17136 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17138 /* AIX libm provides clog as __clog. */
17139 if (TARGET_XCOFF &&
17140 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17141 set_user_assembler_name (tdecl, "__clog");
17143 #ifdef SUBTARGET_INIT_BUILTINS
17144 SUBTARGET_INIT_BUILTINS;
17145 #endif
17148 /* Returns the rs6000 builtin decl for CODE. */
17150 static tree
17151 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17153 HOST_WIDE_INT fnmask;
17155 if (code >= RS6000_BUILTIN_COUNT)
17156 return error_mark_node;
17158 fnmask = rs6000_builtin_info[code].mask;
17159 if ((fnmask & rs6000_builtin_mask) != fnmask)
17161 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17162 return error_mark_node;
17165 return rs6000_builtin_decls[code];
17168 static void
17169 paired_init_builtins (void)
17171 const struct builtin_description *d;
17172 size_t i;
17173 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17175 tree int_ftype_int_v2sf_v2sf
17176 = build_function_type_list (integer_type_node,
17177 integer_type_node,
17178 V2SF_type_node,
17179 V2SF_type_node,
17180 NULL_TREE);
17181 tree pcfloat_type_node =
17182 build_pointer_type (build_qualified_type
17183 (float_type_node, TYPE_QUAL_CONST));
17185 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17186 long_integer_type_node,
17187 pcfloat_type_node,
17188 NULL_TREE);
17189 tree void_ftype_v2sf_long_pcfloat =
17190 build_function_type_list (void_type_node,
17191 V2SF_type_node,
17192 long_integer_type_node,
17193 pcfloat_type_node,
17194 NULL_TREE);
17197 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17198 PAIRED_BUILTIN_LX);
17201 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17202 PAIRED_BUILTIN_STX);
17204 /* Predicates. */
17205 d = bdesc_paired_preds;
17206 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17208 tree type;
17209 HOST_WIDE_INT mask = d->mask;
17211 if ((mask & builtin_mask) != mask)
17213 if (TARGET_DEBUG_BUILTIN)
17214 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17215 d->name);
17216 continue;
17219 /* Cannot define builtin if the instruction is disabled. */
17220 gcc_assert (d->icode != CODE_FOR_nothing);
17222 if (TARGET_DEBUG_BUILTIN)
17223 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17224 (int)i, get_insn_name (d->icode), (int)d->icode,
17225 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17227 switch (insn_data[d->icode].operand[1].mode)
17229 case E_V2SFmode:
17230 type = int_ftype_int_v2sf_v2sf;
17231 break;
17232 default:
17233 gcc_unreachable ();
17236 def_builtin (d->name, type, d->code);
17240 static void
17241 altivec_init_builtins (void)
17243 const struct builtin_description *d;
17244 size_t i;
17245 tree ftype;
17246 tree decl;
17247 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17249 tree pvoid_type_node = build_pointer_type (void_type_node);
17251 tree pcvoid_type_node
17252 = build_pointer_type (build_qualified_type (void_type_node,
17253 TYPE_QUAL_CONST));
17255 tree int_ftype_opaque
17256 = build_function_type_list (integer_type_node,
17257 opaque_V4SI_type_node, NULL_TREE);
17258 tree opaque_ftype_opaque
17259 = build_function_type_list (integer_type_node, NULL_TREE);
17260 tree opaque_ftype_opaque_int
17261 = build_function_type_list (opaque_V4SI_type_node,
17262 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17263 tree opaque_ftype_opaque_opaque_int
17264 = build_function_type_list (opaque_V4SI_type_node,
17265 opaque_V4SI_type_node, opaque_V4SI_type_node,
17266 integer_type_node, NULL_TREE);
17267 tree opaque_ftype_opaque_opaque_opaque
17268 = build_function_type_list (opaque_V4SI_type_node,
17269 opaque_V4SI_type_node, opaque_V4SI_type_node,
17270 opaque_V4SI_type_node, NULL_TREE);
17271 tree opaque_ftype_opaque_opaque
17272 = build_function_type_list (opaque_V4SI_type_node,
17273 opaque_V4SI_type_node, opaque_V4SI_type_node,
17274 NULL_TREE);
17275 tree int_ftype_int_opaque_opaque
17276 = build_function_type_list (integer_type_node,
17277 integer_type_node, opaque_V4SI_type_node,
17278 opaque_V4SI_type_node, NULL_TREE);
17279 tree int_ftype_int_v4si_v4si
17280 = build_function_type_list (integer_type_node,
17281 integer_type_node, V4SI_type_node,
17282 V4SI_type_node, NULL_TREE);
17283 tree int_ftype_int_v2di_v2di
17284 = build_function_type_list (integer_type_node,
17285 integer_type_node, V2DI_type_node,
17286 V2DI_type_node, NULL_TREE);
17287 tree void_ftype_v4si
17288 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17289 tree v8hi_ftype_void
17290 = build_function_type_list (V8HI_type_node, NULL_TREE);
17291 tree void_ftype_void
17292 = build_function_type_list (void_type_node, NULL_TREE);
17293 tree void_ftype_int
17294 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17296 tree opaque_ftype_long_pcvoid
17297 = build_function_type_list (opaque_V4SI_type_node,
17298 long_integer_type_node, pcvoid_type_node,
17299 NULL_TREE);
17300 tree v16qi_ftype_long_pcvoid
17301 = build_function_type_list (V16QI_type_node,
17302 long_integer_type_node, pcvoid_type_node,
17303 NULL_TREE);
17304 tree v8hi_ftype_long_pcvoid
17305 = build_function_type_list (V8HI_type_node,
17306 long_integer_type_node, pcvoid_type_node,
17307 NULL_TREE);
17308 tree v4si_ftype_long_pcvoid
17309 = build_function_type_list (V4SI_type_node,
17310 long_integer_type_node, pcvoid_type_node,
17311 NULL_TREE);
17312 tree v4sf_ftype_long_pcvoid
17313 = build_function_type_list (V4SF_type_node,
17314 long_integer_type_node, pcvoid_type_node,
17315 NULL_TREE);
17316 tree v2df_ftype_long_pcvoid
17317 = build_function_type_list (V2DF_type_node,
17318 long_integer_type_node, pcvoid_type_node,
17319 NULL_TREE);
17320 tree v2di_ftype_long_pcvoid
17321 = build_function_type_list (V2DI_type_node,
17322 long_integer_type_node, pcvoid_type_node,
17323 NULL_TREE);
17325 tree void_ftype_opaque_long_pvoid
17326 = build_function_type_list (void_type_node,
17327 opaque_V4SI_type_node, long_integer_type_node,
17328 pvoid_type_node, NULL_TREE);
17329 tree void_ftype_v4si_long_pvoid
17330 = build_function_type_list (void_type_node,
17331 V4SI_type_node, long_integer_type_node,
17332 pvoid_type_node, NULL_TREE);
17333 tree void_ftype_v16qi_long_pvoid
17334 = build_function_type_list (void_type_node,
17335 V16QI_type_node, long_integer_type_node,
17336 pvoid_type_node, NULL_TREE);
17338 tree void_ftype_v16qi_pvoid_long
17339 = build_function_type_list (void_type_node,
17340 V16QI_type_node, pvoid_type_node,
17341 long_integer_type_node, NULL_TREE);
17343 tree void_ftype_v8hi_long_pvoid
17344 = build_function_type_list (void_type_node,
17345 V8HI_type_node, long_integer_type_node,
17346 pvoid_type_node, NULL_TREE);
17347 tree void_ftype_v4sf_long_pvoid
17348 = build_function_type_list (void_type_node,
17349 V4SF_type_node, long_integer_type_node,
17350 pvoid_type_node, NULL_TREE);
17351 tree void_ftype_v2df_long_pvoid
17352 = build_function_type_list (void_type_node,
17353 V2DF_type_node, long_integer_type_node,
17354 pvoid_type_node, NULL_TREE);
17355 tree void_ftype_v2di_long_pvoid
17356 = build_function_type_list (void_type_node,
17357 V2DI_type_node, long_integer_type_node,
17358 pvoid_type_node, NULL_TREE);
17359 tree int_ftype_int_v8hi_v8hi
17360 = build_function_type_list (integer_type_node,
17361 integer_type_node, V8HI_type_node,
17362 V8HI_type_node, NULL_TREE);
17363 tree int_ftype_int_v16qi_v16qi
17364 = build_function_type_list (integer_type_node,
17365 integer_type_node, V16QI_type_node,
17366 V16QI_type_node, NULL_TREE);
17367 tree int_ftype_int_v4sf_v4sf
17368 = build_function_type_list (integer_type_node,
17369 integer_type_node, V4SF_type_node,
17370 V4SF_type_node, NULL_TREE);
17371 tree int_ftype_int_v2df_v2df
17372 = build_function_type_list (integer_type_node,
17373 integer_type_node, V2DF_type_node,
17374 V2DF_type_node, NULL_TREE);
17375 tree v2di_ftype_v2di
17376 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17377 tree v4si_ftype_v4si
17378 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17379 tree v8hi_ftype_v8hi
17380 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17381 tree v16qi_ftype_v16qi
17382 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17383 tree v4sf_ftype_v4sf
17384 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17385 tree v2df_ftype_v2df
17386 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17387 tree void_ftype_pcvoid_int_int
17388 = build_function_type_list (void_type_node,
17389 pcvoid_type_node, integer_type_node,
17390 integer_type_node, NULL_TREE);
17392 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17393 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17394 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17395 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17396 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17397 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17398 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17399 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17400 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17401 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17402 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17403 ALTIVEC_BUILTIN_LVXL_V2DF);
17404 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17405 ALTIVEC_BUILTIN_LVXL_V2DI);
17406 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17407 ALTIVEC_BUILTIN_LVXL_V4SF);
17408 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17409 ALTIVEC_BUILTIN_LVXL_V4SI);
17410 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17411 ALTIVEC_BUILTIN_LVXL_V8HI);
17412 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17413 ALTIVEC_BUILTIN_LVXL_V16QI);
17414 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17415 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17416 ALTIVEC_BUILTIN_LVX_V2DF);
17417 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17418 ALTIVEC_BUILTIN_LVX_V2DI);
17419 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17420 ALTIVEC_BUILTIN_LVX_V4SF);
17421 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17422 ALTIVEC_BUILTIN_LVX_V4SI);
17423 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17424 ALTIVEC_BUILTIN_LVX_V8HI);
17425 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17426 ALTIVEC_BUILTIN_LVX_V16QI);
17427 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17428 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17429 ALTIVEC_BUILTIN_STVX_V2DF);
17430 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17431 ALTIVEC_BUILTIN_STVX_V2DI);
17432 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17433 ALTIVEC_BUILTIN_STVX_V4SF);
17434 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17435 ALTIVEC_BUILTIN_STVX_V4SI);
17436 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17437 ALTIVEC_BUILTIN_STVX_V8HI);
17438 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17439 ALTIVEC_BUILTIN_STVX_V16QI);
17440 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17441 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17442 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17443 ALTIVEC_BUILTIN_STVXL_V2DF);
17444 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17445 ALTIVEC_BUILTIN_STVXL_V2DI);
17446 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17447 ALTIVEC_BUILTIN_STVXL_V4SF);
17448 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17449 ALTIVEC_BUILTIN_STVXL_V4SI);
17450 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17451 ALTIVEC_BUILTIN_STVXL_V8HI);
17452 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17453 ALTIVEC_BUILTIN_STVXL_V16QI);
17454 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17455 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17456 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17457 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17458 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17459 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17460 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17461 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17462 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17463 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17464 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17465 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17466 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17467 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17468 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17469 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17471 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17472 VSX_BUILTIN_LXVD2X_V2DF);
17473 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17474 VSX_BUILTIN_LXVD2X_V2DI);
17475 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17476 VSX_BUILTIN_LXVW4X_V4SF);
17477 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17478 VSX_BUILTIN_LXVW4X_V4SI);
17479 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17480 VSX_BUILTIN_LXVW4X_V8HI);
17481 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17482 VSX_BUILTIN_LXVW4X_V16QI);
17483 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17484 VSX_BUILTIN_STXVD2X_V2DF);
17485 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17486 VSX_BUILTIN_STXVD2X_V2DI);
17487 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17488 VSX_BUILTIN_STXVW4X_V4SF);
17489 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17490 VSX_BUILTIN_STXVW4X_V4SI);
17491 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17492 VSX_BUILTIN_STXVW4X_V8HI);
17493 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17494 VSX_BUILTIN_STXVW4X_V16QI);
17496 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17497 VSX_BUILTIN_LD_ELEMREV_V2DF);
17498 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17499 VSX_BUILTIN_LD_ELEMREV_V2DI);
17500 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17501 VSX_BUILTIN_LD_ELEMREV_V4SF);
17502 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17503 VSX_BUILTIN_LD_ELEMREV_V4SI);
17504 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17505 VSX_BUILTIN_LD_ELEMREV_V8HI);
17506 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17507 VSX_BUILTIN_LD_ELEMREV_V16QI);
17508 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17509 VSX_BUILTIN_ST_ELEMREV_V2DF);
17510 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17511 VSX_BUILTIN_ST_ELEMREV_V2DI);
17512 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17513 VSX_BUILTIN_ST_ELEMREV_V4SF);
17514 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17515 VSX_BUILTIN_ST_ELEMREV_V4SI);
17516 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17517 VSX_BUILTIN_ST_ELEMREV_V8HI);
17518 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17519 VSX_BUILTIN_ST_ELEMREV_V16QI);
17521 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17522 VSX_BUILTIN_VEC_LD);
17523 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17524 VSX_BUILTIN_VEC_ST);
17525 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17526 VSX_BUILTIN_VEC_XL);
17527 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17528 VSX_BUILTIN_VEC_XL_BE);
17529 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17530 VSX_BUILTIN_VEC_XST);
17531 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17532 VSX_BUILTIN_VEC_XST_BE);
17534 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17535 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17536 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17538 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17539 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17540 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17541 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17542 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17543 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17544 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17545 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17546 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17547 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17548 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17549 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17551 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17552 ALTIVEC_BUILTIN_VEC_ADDE);
17553 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17554 ALTIVEC_BUILTIN_VEC_ADDEC);
17555 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17556 ALTIVEC_BUILTIN_VEC_CMPNE);
17557 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17558 ALTIVEC_BUILTIN_VEC_MUL);
17559 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17560 ALTIVEC_BUILTIN_VEC_SUBE);
17561 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17562 ALTIVEC_BUILTIN_VEC_SUBEC);
17564 /* Cell builtins. */
17565 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17566 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17567 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17568 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17570 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17571 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17572 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17573 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17575 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17576 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17577 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17578 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17580 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17581 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17582 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17583 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17585 if (TARGET_P9_VECTOR)
17587 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17588 P9V_BUILTIN_STXVL);
17589 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17590 P9V_BUILTIN_XST_LEN_R);
17593 /* Add the DST variants. */
17594 d = bdesc_dst;
17595 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17597 HOST_WIDE_INT mask = d->mask;
17599 /* It is expected that these dst built-in functions may have
17600 d->icode equal to CODE_FOR_nothing. */
17601 if ((mask & builtin_mask) != mask)
17603 if (TARGET_DEBUG_BUILTIN)
17604 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17605 d->name);
17606 continue;
17608 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17611 /* Initialize the predicates. */
17612 d = bdesc_altivec_preds;
17613 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17615 machine_mode mode1;
17616 tree type;
17617 HOST_WIDE_INT mask = d->mask;
17619 if ((mask & builtin_mask) != mask)
17621 if (TARGET_DEBUG_BUILTIN)
17622 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17623 d->name);
17624 continue;
17627 if (rs6000_overloaded_builtin_p (d->code))
17628 mode1 = VOIDmode;
17629 else
17631 /* Cannot define builtin if the instruction is disabled. */
17632 gcc_assert (d->icode != CODE_FOR_nothing);
17633 mode1 = insn_data[d->icode].operand[1].mode;
17636 switch (mode1)
17638 case E_VOIDmode:
17639 type = int_ftype_int_opaque_opaque;
17640 break;
17641 case E_V2DImode:
17642 type = int_ftype_int_v2di_v2di;
17643 break;
17644 case E_V4SImode:
17645 type = int_ftype_int_v4si_v4si;
17646 break;
17647 case E_V8HImode:
17648 type = int_ftype_int_v8hi_v8hi;
17649 break;
17650 case E_V16QImode:
17651 type = int_ftype_int_v16qi_v16qi;
17652 break;
17653 case E_V4SFmode:
17654 type = int_ftype_int_v4sf_v4sf;
17655 break;
17656 case E_V2DFmode:
17657 type = int_ftype_int_v2df_v2df;
17658 break;
17659 default:
17660 gcc_unreachable ();
17663 def_builtin (d->name, type, d->code);
17666 /* Initialize the abs* operators. */
17667 d = bdesc_abs;
17668 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17670 machine_mode mode0;
17671 tree type;
17672 HOST_WIDE_INT mask = d->mask;
17674 if ((mask & builtin_mask) != mask)
17676 if (TARGET_DEBUG_BUILTIN)
17677 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17678 d->name);
17679 continue;
17682 /* Cannot define builtin if the instruction is disabled. */
17683 gcc_assert (d->icode != CODE_FOR_nothing);
17684 mode0 = insn_data[d->icode].operand[0].mode;
17686 switch (mode0)
17688 case E_V2DImode:
17689 type = v2di_ftype_v2di;
17690 break;
17691 case E_V4SImode:
17692 type = v4si_ftype_v4si;
17693 break;
17694 case E_V8HImode:
17695 type = v8hi_ftype_v8hi;
17696 break;
17697 case E_V16QImode:
17698 type = v16qi_ftype_v16qi;
17699 break;
17700 case E_V4SFmode:
17701 type = v4sf_ftype_v4sf;
17702 break;
17703 case E_V2DFmode:
17704 type = v2df_ftype_v2df;
17705 break;
17706 default:
17707 gcc_unreachable ();
17710 def_builtin (d->name, type, d->code);
17713 /* Initialize target builtin that implements
17714 targetm.vectorize.builtin_mask_for_load. */
17716 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17717 v16qi_ftype_long_pcvoid,
17718 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17719 BUILT_IN_MD, NULL, NULL_TREE);
17720 TREE_READONLY (decl) = 1;
17721 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17722 altivec_builtin_mask_for_load = decl;
17724 /* Access to the vec_init patterns. */
17725 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17726 integer_type_node, integer_type_node,
17727 integer_type_node, NULL_TREE);
17728 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17730 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17731 short_integer_type_node,
17732 short_integer_type_node,
17733 short_integer_type_node,
17734 short_integer_type_node,
17735 short_integer_type_node,
17736 short_integer_type_node,
17737 short_integer_type_node, NULL_TREE);
17738 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17740 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17741 char_type_node, char_type_node,
17742 char_type_node, char_type_node,
17743 char_type_node, char_type_node,
17744 char_type_node, char_type_node,
17745 char_type_node, char_type_node,
17746 char_type_node, char_type_node,
17747 char_type_node, char_type_node,
17748 char_type_node, NULL_TREE);
17749 def_builtin ("__builtin_vec_init_v16qi", ftype,
17750 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17752 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17753 float_type_node, float_type_node,
17754 float_type_node, NULL_TREE);
17755 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17757 /* VSX builtins. */
17758 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17759 double_type_node, NULL_TREE);
17760 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17762 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17763 intDI_type_node, NULL_TREE);
17764 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17766 /* Access to the vec_set patterns. */
17767 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17768 intSI_type_node,
17769 integer_type_node, NULL_TREE);
17770 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17772 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17773 intHI_type_node,
17774 integer_type_node, NULL_TREE);
17775 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17777 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17778 intQI_type_node,
17779 integer_type_node, NULL_TREE);
17780 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17782 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17783 float_type_node,
17784 integer_type_node, NULL_TREE);
17785 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17787 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17788 double_type_node,
17789 integer_type_node, NULL_TREE);
17790 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17792 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17793 intDI_type_node,
17794 integer_type_node, NULL_TREE);
17795 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17797 /* Access to the vec_extract patterns. */
17798 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17799 integer_type_node, NULL_TREE);
17800 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17802 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17803 integer_type_node, NULL_TREE);
17804 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17806 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17807 integer_type_node, NULL_TREE);
17808 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17810 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17811 integer_type_node, NULL_TREE);
17812 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17814 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17815 integer_type_node, NULL_TREE);
17816 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17818 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17819 integer_type_node, NULL_TREE);
17820 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17823 if (V1TI_type_node)
17825 tree v1ti_ftype_long_pcvoid
17826 = build_function_type_list (V1TI_type_node,
17827 long_integer_type_node, pcvoid_type_node,
17828 NULL_TREE);
17829 tree void_ftype_v1ti_long_pvoid
17830 = build_function_type_list (void_type_node,
17831 V1TI_type_node, long_integer_type_node,
17832 pvoid_type_node, NULL_TREE);
17833 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17834 VSX_BUILTIN_LXVD2X_V1TI);
17835 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17836 VSX_BUILTIN_STXVD2X_V1TI);
17837 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17838 NULL_TREE, NULL_TREE);
17839 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17840 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17841 intTI_type_node,
17842 integer_type_node, NULL_TREE);
17843 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17844 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17845 integer_type_node, NULL_TREE);
17846 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17851 static void
17852 htm_init_builtins (void)
17854 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17855 const struct builtin_description *d;
17856 size_t i;
17858 d = bdesc_htm;
17859 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17861 tree op[MAX_HTM_OPERANDS], type;
17862 HOST_WIDE_INT mask = d->mask;
17863 unsigned attr = rs6000_builtin_info[d->code].attr;
17864 bool void_func = (attr & RS6000_BTC_VOID);
17865 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17866 int nopnds = 0;
17867 tree gpr_type_node;
17868 tree rettype;
17869 tree argtype;
17871 /* It is expected that these htm built-in functions may have
17872 d->icode equal to CODE_FOR_nothing. */
17874 if (TARGET_32BIT && TARGET_POWERPC64)
17875 gpr_type_node = long_long_unsigned_type_node;
17876 else
17877 gpr_type_node = long_unsigned_type_node;
17879 if (attr & RS6000_BTC_SPR)
17881 rettype = gpr_type_node;
17882 argtype = gpr_type_node;
17884 else if (d->code == HTM_BUILTIN_TABORTDC
17885 || d->code == HTM_BUILTIN_TABORTDCI)
17887 rettype = unsigned_type_node;
17888 argtype = gpr_type_node;
17890 else
17892 rettype = unsigned_type_node;
17893 argtype = unsigned_type_node;
17896 if ((mask & builtin_mask) != mask)
17898 if (TARGET_DEBUG_BUILTIN)
17899 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17900 continue;
17903 if (d->name == 0)
17905 if (TARGET_DEBUG_BUILTIN)
17906 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17907 (long unsigned) i);
17908 continue;
17911 op[nopnds++] = (void_func) ? void_type_node : rettype;
17913 if (attr_args == RS6000_BTC_UNARY)
17914 op[nopnds++] = argtype;
17915 else if (attr_args == RS6000_BTC_BINARY)
17917 op[nopnds++] = argtype;
17918 op[nopnds++] = argtype;
17920 else if (attr_args == RS6000_BTC_TERNARY)
17922 op[nopnds++] = argtype;
17923 op[nopnds++] = argtype;
17924 op[nopnds++] = argtype;
17927 switch (nopnds)
17929 case 1:
17930 type = build_function_type_list (op[0], NULL_TREE);
17931 break;
17932 case 2:
17933 type = build_function_type_list (op[0], op[1], NULL_TREE);
17934 break;
17935 case 3:
17936 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17937 break;
17938 case 4:
17939 type = build_function_type_list (op[0], op[1], op[2], op[3],
17940 NULL_TREE);
17941 break;
17942 default:
17943 gcc_unreachable ();
17946 def_builtin (d->name, type, d->code);
17950 /* Hash function for builtin functions with up to 3 arguments and a return
17951 type. */
17952 hashval_t
17953 builtin_hasher::hash (builtin_hash_struct *bh)
17955 unsigned ret = 0;
17956 int i;
17958 for (i = 0; i < 4; i++)
17960 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17961 ret = (ret * 2) + bh->uns_p[i];
17964 return ret;
17967 /* Compare builtin hash entries H1 and H2 for equivalence. */
17968 bool
17969 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17971 return ((p1->mode[0] == p2->mode[0])
17972 && (p1->mode[1] == p2->mode[1])
17973 && (p1->mode[2] == p2->mode[2])
17974 && (p1->mode[3] == p2->mode[3])
17975 && (p1->uns_p[0] == p2->uns_p[0])
17976 && (p1->uns_p[1] == p2->uns_p[1])
17977 && (p1->uns_p[2] == p2->uns_p[2])
17978 && (p1->uns_p[3] == p2->uns_p[3]));
17981 /* Map types for builtin functions with an explicit return type and up to 3
17982 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17983 of the argument. */
17984 static tree
17985 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17986 machine_mode mode_arg1, machine_mode mode_arg2,
17987 enum rs6000_builtins builtin, const char *name)
17989 struct builtin_hash_struct h;
17990 struct builtin_hash_struct *h2;
17991 int num_args = 3;
17992 int i;
17993 tree ret_type = NULL_TREE;
17994 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17996 /* Create builtin_hash_table. */
17997 if (builtin_hash_table == NULL)
17998 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18000 h.type = NULL_TREE;
18001 h.mode[0] = mode_ret;
18002 h.mode[1] = mode_arg0;
18003 h.mode[2] = mode_arg1;
18004 h.mode[3] = mode_arg2;
18005 h.uns_p[0] = 0;
18006 h.uns_p[1] = 0;
18007 h.uns_p[2] = 0;
18008 h.uns_p[3] = 0;
18010 /* If the builtin is a type that produces unsigned results or takes unsigned
18011 arguments, and it is returned as a decl for the vectorizer (such as
18012 widening multiplies, permute), make sure the arguments and return value
18013 are type correct. */
18014 switch (builtin)
18016 /* unsigned 1 argument functions. */
18017 case CRYPTO_BUILTIN_VSBOX:
18018 case P8V_BUILTIN_VGBBD:
18019 case MISC_BUILTIN_CDTBCD:
18020 case MISC_BUILTIN_CBCDTD:
18021 h.uns_p[0] = 1;
18022 h.uns_p[1] = 1;
18023 break;
18025 /* unsigned 2 argument functions. */
18026 case ALTIVEC_BUILTIN_VMULEUB:
18027 case ALTIVEC_BUILTIN_VMULEUH:
18028 case ALTIVEC_BUILTIN_VMULEUW:
18029 case ALTIVEC_BUILTIN_VMULOUB:
18030 case ALTIVEC_BUILTIN_VMULOUH:
18031 case ALTIVEC_BUILTIN_VMULOUW:
18032 case CRYPTO_BUILTIN_VCIPHER:
18033 case CRYPTO_BUILTIN_VCIPHERLAST:
18034 case CRYPTO_BUILTIN_VNCIPHER:
18035 case CRYPTO_BUILTIN_VNCIPHERLAST:
18036 case CRYPTO_BUILTIN_VPMSUMB:
18037 case CRYPTO_BUILTIN_VPMSUMH:
18038 case CRYPTO_BUILTIN_VPMSUMW:
18039 case CRYPTO_BUILTIN_VPMSUMD:
18040 case CRYPTO_BUILTIN_VPMSUM:
18041 case MISC_BUILTIN_ADDG6S:
18042 case MISC_BUILTIN_DIVWEU:
18043 case MISC_BUILTIN_DIVWEUO:
18044 case MISC_BUILTIN_DIVDEU:
18045 case MISC_BUILTIN_DIVDEUO:
18046 case VSX_BUILTIN_UDIV_V2DI:
18047 case ALTIVEC_BUILTIN_VMAXUB:
18048 case ALTIVEC_BUILTIN_VMINUB:
18049 case ALTIVEC_BUILTIN_VMAXUH:
18050 case ALTIVEC_BUILTIN_VMINUH:
18051 case ALTIVEC_BUILTIN_VMAXUW:
18052 case ALTIVEC_BUILTIN_VMINUW:
18053 case P8V_BUILTIN_VMAXUD:
18054 case P8V_BUILTIN_VMINUD:
18055 h.uns_p[0] = 1;
18056 h.uns_p[1] = 1;
18057 h.uns_p[2] = 1;
18058 break;
18060 /* unsigned 3 argument functions. */
18061 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18062 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18063 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18064 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18065 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18066 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18067 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18068 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18069 case VSX_BUILTIN_VPERM_16QI_UNS:
18070 case VSX_BUILTIN_VPERM_8HI_UNS:
18071 case VSX_BUILTIN_VPERM_4SI_UNS:
18072 case VSX_BUILTIN_VPERM_2DI_UNS:
18073 case VSX_BUILTIN_XXSEL_16QI_UNS:
18074 case VSX_BUILTIN_XXSEL_8HI_UNS:
18075 case VSX_BUILTIN_XXSEL_4SI_UNS:
18076 case VSX_BUILTIN_XXSEL_2DI_UNS:
18077 case CRYPTO_BUILTIN_VPERMXOR:
18078 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18079 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18080 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18081 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18082 case CRYPTO_BUILTIN_VSHASIGMAW:
18083 case CRYPTO_BUILTIN_VSHASIGMAD:
18084 case CRYPTO_BUILTIN_VSHASIGMA:
18085 h.uns_p[0] = 1;
18086 h.uns_p[1] = 1;
18087 h.uns_p[2] = 1;
18088 h.uns_p[3] = 1;
18089 break;
18091 /* signed permute functions with unsigned char mask. */
18092 case ALTIVEC_BUILTIN_VPERM_16QI:
18093 case ALTIVEC_BUILTIN_VPERM_8HI:
18094 case ALTIVEC_BUILTIN_VPERM_4SI:
18095 case ALTIVEC_BUILTIN_VPERM_4SF:
18096 case ALTIVEC_BUILTIN_VPERM_2DI:
18097 case ALTIVEC_BUILTIN_VPERM_2DF:
18098 case VSX_BUILTIN_VPERM_16QI:
18099 case VSX_BUILTIN_VPERM_8HI:
18100 case VSX_BUILTIN_VPERM_4SI:
18101 case VSX_BUILTIN_VPERM_4SF:
18102 case VSX_BUILTIN_VPERM_2DI:
18103 case VSX_BUILTIN_VPERM_2DF:
18104 h.uns_p[3] = 1;
18105 break;
18107 /* unsigned args, signed return. */
18108 case VSX_BUILTIN_XVCVUXDSP:
18109 case VSX_BUILTIN_XVCVUXDDP_UNS:
18110 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18111 h.uns_p[1] = 1;
18112 break;
18114 /* signed args, unsigned return. */
18115 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18116 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18117 case MISC_BUILTIN_UNPACK_TD:
18118 case MISC_BUILTIN_UNPACK_V1TI:
18119 h.uns_p[0] = 1;
18120 break;
18122 /* unsigned arguments, bool return (compares). */
18123 case ALTIVEC_BUILTIN_VCMPEQUB:
18124 case ALTIVEC_BUILTIN_VCMPEQUH:
18125 case ALTIVEC_BUILTIN_VCMPEQUW:
18126 case P8V_BUILTIN_VCMPEQUD:
18127 case VSX_BUILTIN_CMPGE_U16QI:
18128 case VSX_BUILTIN_CMPGE_U8HI:
18129 case VSX_BUILTIN_CMPGE_U4SI:
18130 case VSX_BUILTIN_CMPGE_U2DI:
18131 case ALTIVEC_BUILTIN_VCMPGTUB:
18132 case ALTIVEC_BUILTIN_VCMPGTUH:
18133 case ALTIVEC_BUILTIN_VCMPGTUW:
18134 case P8V_BUILTIN_VCMPGTUD:
18135 h.uns_p[1] = 1;
18136 h.uns_p[2] = 1;
18137 break;
18139 /* unsigned arguments for 128-bit pack instructions. */
18140 case MISC_BUILTIN_PACK_TD:
18141 case MISC_BUILTIN_PACK_V1TI:
18142 h.uns_p[1] = 1;
18143 h.uns_p[2] = 1;
18144 break;
18146 /* unsigned second arguments (vector shift right). */
18147 case ALTIVEC_BUILTIN_VSRB:
18148 case ALTIVEC_BUILTIN_VSRH:
18149 case ALTIVEC_BUILTIN_VSRW:
18150 case P8V_BUILTIN_VSRD:
18151 h.uns_p[2] = 1;
18152 break;
18154 default:
18155 break;
18158 /* Figure out how many args are present. */
18159 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18160 num_args--;
18162 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18163 if (!ret_type && h.uns_p[0])
18164 ret_type = builtin_mode_to_type[h.mode[0]][0];
18166 if (!ret_type)
18167 fatal_error (input_location,
18168 "internal error: builtin function %qs had an unexpected "
18169 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18171 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18172 arg_type[i] = NULL_TREE;
18174 for (i = 0; i < num_args; i++)
18176 int m = (int) h.mode[i+1];
18177 int uns_p = h.uns_p[i+1];
18179 arg_type[i] = builtin_mode_to_type[m][uns_p];
18180 if (!arg_type[i] && uns_p)
18181 arg_type[i] = builtin_mode_to_type[m][0];
18183 if (!arg_type[i])
18184 fatal_error (input_location,
18185 "internal error: builtin function %qs, argument %d "
18186 "had unexpected argument type %qs", name, i,
18187 GET_MODE_NAME (m));
18190 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18191 if (*found == NULL)
18193 h2 = ggc_alloc<builtin_hash_struct> ();
18194 *h2 = h;
18195 *found = h2;
18197 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18198 arg_type[2], NULL_TREE);
18201 return (*found)->type;
18204 static void
18205 rs6000_common_init_builtins (void)
18207 const struct builtin_description *d;
18208 size_t i;
18210 tree opaque_ftype_opaque = NULL_TREE;
18211 tree opaque_ftype_opaque_opaque = NULL_TREE;
18212 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18213 tree v2si_ftype = NULL_TREE;
18214 tree v2si_ftype_qi = NULL_TREE;
18215 tree v2si_ftype_v2si_qi = NULL_TREE;
18216 tree v2si_ftype_int_qi = NULL_TREE;
18217 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18219 if (!TARGET_PAIRED_FLOAT)
18221 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18222 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18225 /* Paired builtins are only available if you build a compiler with the
18226 appropriate options, so only create those builtins with the appropriate
18227 compiler option. Create Altivec and VSX builtins on machines with at
18228 least the general purpose extensions (970 and newer) to allow the use of
18229 the target attribute.. */
18231 if (TARGET_EXTRA_BUILTINS)
18232 builtin_mask |= RS6000_BTM_COMMON;
18234 /* Add the ternary operators. */
18235 d = bdesc_3arg;
18236 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18238 tree type;
18239 HOST_WIDE_INT mask = d->mask;
18241 if ((mask & builtin_mask) != mask)
18243 if (TARGET_DEBUG_BUILTIN)
18244 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18245 continue;
18248 if (rs6000_overloaded_builtin_p (d->code))
18250 if (! (type = opaque_ftype_opaque_opaque_opaque))
18251 type = opaque_ftype_opaque_opaque_opaque
18252 = build_function_type_list (opaque_V4SI_type_node,
18253 opaque_V4SI_type_node,
18254 opaque_V4SI_type_node,
18255 opaque_V4SI_type_node,
18256 NULL_TREE);
18258 else
18260 enum insn_code icode = d->icode;
18261 if (d->name == 0)
18263 if (TARGET_DEBUG_BUILTIN)
18264 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18265 (long unsigned)i);
18267 continue;
18270 if (icode == CODE_FOR_nothing)
18272 if (TARGET_DEBUG_BUILTIN)
18273 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18274 d->name);
18276 continue;
18279 type = builtin_function_type (insn_data[icode].operand[0].mode,
18280 insn_data[icode].operand[1].mode,
18281 insn_data[icode].operand[2].mode,
18282 insn_data[icode].operand[3].mode,
18283 d->code, d->name);
18286 def_builtin (d->name, type, d->code);
18289 /* Add the binary operators. */
18290 d = bdesc_2arg;
18291 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18293 machine_mode mode0, mode1, mode2;
18294 tree type;
18295 HOST_WIDE_INT mask = d->mask;
18297 if ((mask & builtin_mask) != mask)
18299 if (TARGET_DEBUG_BUILTIN)
18300 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18301 continue;
18304 if (rs6000_overloaded_builtin_p (d->code))
18306 if (! (type = opaque_ftype_opaque_opaque))
18307 type = opaque_ftype_opaque_opaque
18308 = build_function_type_list (opaque_V4SI_type_node,
18309 opaque_V4SI_type_node,
18310 opaque_V4SI_type_node,
18311 NULL_TREE);
18313 else
18315 enum insn_code icode = d->icode;
18316 if (d->name == 0)
18318 if (TARGET_DEBUG_BUILTIN)
18319 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18320 (long unsigned)i);
18322 continue;
18325 if (icode == CODE_FOR_nothing)
18327 if (TARGET_DEBUG_BUILTIN)
18328 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18329 d->name);
18331 continue;
18334 mode0 = insn_data[icode].operand[0].mode;
18335 mode1 = insn_data[icode].operand[1].mode;
18336 mode2 = insn_data[icode].operand[2].mode;
18338 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18340 if (! (type = v2si_ftype_v2si_qi))
18341 type = v2si_ftype_v2si_qi
18342 = build_function_type_list (opaque_V2SI_type_node,
18343 opaque_V2SI_type_node,
18344 char_type_node,
18345 NULL_TREE);
18348 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18349 && mode2 == QImode)
18351 if (! (type = v2si_ftype_int_qi))
18352 type = v2si_ftype_int_qi
18353 = build_function_type_list (opaque_V2SI_type_node,
18354 integer_type_node,
18355 char_type_node,
18356 NULL_TREE);
18359 else
18360 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18361 d->code, d->name);
18364 def_builtin (d->name, type, d->code);
18367 /* Add the simple unary operators. */
18368 d = bdesc_1arg;
18369 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18371 machine_mode mode0, mode1;
18372 tree type;
18373 HOST_WIDE_INT mask = d->mask;
18375 if ((mask & builtin_mask) != mask)
18377 if (TARGET_DEBUG_BUILTIN)
18378 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18379 continue;
18382 if (rs6000_overloaded_builtin_p (d->code))
18384 if (! (type = opaque_ftype_opaque))
18385 type = opaque_ftype_opaque
18386 = build_function_type_list (opaque_V4SI_type_node,
18387 opaque_V4SI_type_node,
18388 NULL_TREE);
18390 else
18392 enum insn_code icode = d->icode;
18393 if (d->name == 0)
18395 if (TARGET_DEBUG_BUILTIN)
18396 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18397 (long unsigned)i);
18399 continue;
18402 if (icode == CODE_FOR_nothing)
18404 if (TARGET_DEBUG_BUILTIN)
18405 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18406 d->name);
18408 continue;
18411 mode0 = insn_data[icode].operand[0].mode;
18412 mode1 = insn_data[icode].operand[1].mode;
18414 if (mode0 == V2SImode && mode1 == QImode)
18416 if (! (type = v2si_ftype_qi))
18417 type = v2si_ftype_qi
18418 = build_function_type_list (opaque_V2SI_type_node,
18419 char_type_node,
18420 NULL_TREE);
18423 else
18424 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18425 d->code, d->name);
18428 def_builtin (d->name, type, d->code);
18431 /* Add the simple no-argument operators. */
18432 d = bdesc_0arg;
18433 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18435 machine_mode mode0;
18436 tree type;
18437 HOST_WIDE_INT mask = d->mask;
18439 if ((mask & builtin_mask) != mask)
18441 if (TARGET_DEBUG_BUILTIN)
18442 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18443 continue;
18445 if (rs6000_overloaded_builtin_p (d->code))
18447 if (!opaque_ftype_opaque)
18448 opaque_ftype_opaque
18449 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18450 type = opaque_ftype_opaque;
18452 else
18454 enum insn_code icode = d->icode;
18455 if (d->name == 0)
18457 if (TARGET_DEBUG_BUILTIN)
18458 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18459 (long unsigned) i);
18460 continue;
18462 if (icode == CODE_FOR_nothing)
18464 if (TARGET_DEBUG_BUILTIN)
18465 fprintf (stderr,
18466 "rs6000_builtin, skip no-argument %s (no code)\n",
18467 d->name);
18468 continue;
18470 mode0 = insn_data[icode].operand[0].mode;
18471 if (mode0 == V2SImode)
18473 /* code for paired single */
18474 if (! (type = v2si_ftype))
18476 v2si_ftype
18477 = build_function_type_list (opaque_V2SI_type_node,
18478 NULL_TREE);
18479 type = v2si_ftype;
18482 else
18483 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18484 d->code, d->name);
18486 def_builtin (d->name, type, d->code);
18490 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18491 static void
18492 init_float128_ibm (machine_mode mode)
18494 if (!TARGET_XL_COMPAT)
18496 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18497 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18498 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18499 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18501 if (!TARGET_HARD_FLOAT)
18503 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18504 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18505 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18506 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18507 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18508 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18509 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18510 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18512 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18513 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18514 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18515 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18516 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18517 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18518 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18519 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18522 else
18524 set_optab_libfunc (add_optab, mode, "_xlqadd");
18525 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18526 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18527 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18530 /* Add various conversions for IFmode to use the traditional TFmode
18531 names. */
18532 if (mode == IFmode)
18534 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18535 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18536 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18537 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18538 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18539 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18541 if (TARGET_POWERPC64)
18543 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18544 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18545 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18546 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18551 /* Set up IEEE 128-bit floating point routines. Use different names if the
18552 arguments can be passed in a vector register. The historical PowerPC
18553 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18554 continue to use that if we aren't using vector registers to pass IEEE
18555 128-bit floating point. */
18557 static void
18558 init_float128_ieee (machine_mode mode)
18560 if (FLOAT128_VECTOR_P (mode))
18562 set_optab_libfunc (add_optab, mode, "__addkf3");
18563 set_optab_libfunc (sub_optab, mode, "__subkf3");
18564 set_optab_libfunc (neg_optab, mode, "__negkf2");
18565 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18566 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18567 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18568 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18570 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18571 set_optab_libfunc (ne_optab, mode, "__nekf2");
18572 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18573 set_optab_libfunc (ge_optab, mode, "__gekf2");
18574 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18575 set_optab_libfunc (le_optab, mode, "__lekf2");
18576 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18578 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18579 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18580 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18581 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18583 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18584 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18585 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18587 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18588 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18589 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18591 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18592 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18593 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18594 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18595 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18596 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18598 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18599 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18600 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18601 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18603 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18604 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18605 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18606 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18608 if (TARGET_POWERPC64)
18610 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18611 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18612 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18613 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18617 else
18619 set_optab_libfunc (add_optab, mode, "_q_add");
18620 set_optab_libfunc (sub_optab, mode, "_q_sub");
18621 set_optab_libfunc (neg_optab, mode, "_q_neg");
18622 set_optab_libfunc (smul_optab, mode, "_q_mul");
18623 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18624 if (TARGET_PPC_GPOPT)
18625 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18627 set_optab_libfunc (eq_optab, mode, "_q_feq");
18628 set_optab_libfunc (ne_optab, mode, "_q_fne");
18629 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18630 set_optab_libfunc (ge_optab, mode, "_q_fge");
18631 set_optab_libfunc (lt_optab, mode, "_q_flt");
18632 set_optab_libfunc (le_optab, mode, "_q_fle");
18634 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18635 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18636 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18637 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18638 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18639 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18640 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18641 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18645 static void
18646 rs6000_init_libfuncs (void)
18648 /* __float128 support. */
18649 if (TARGET_FLOAT128_TYPE)
18651 init_float128_ibm (IFmode);
18652 init_float128_ieee (KFmode);
18655 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18656 if (TARGET_LONG_DOUBLE_128)
18658 if (!TARGET_IEEEQUAD)
18659 init_float128_ibm (TFmode);
18661 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18662 else
18663 init_float128_ieee (TFmode);
18667 /* Emit a potentially record-form instruction, setting DST from SRC.
18668 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18669 signed comparison of DST with zero. If DOT is 1, the generated RTL
18670 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18671 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18672 a separate COMPARE. */
18674 void
18675 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18677 if (dot == 0)
18679 emit_move_insn (dst, src);
18680 return;
18683 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18685 emit_move_insn (dst, src);
18686 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18687 return;
18690 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18691 if (dot == 1)
18693 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18694 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18696 else
18698 rtx set = gen_rtx_SET (dst, src);
18699 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18704 /* A validation routine: say whether CODE, a condition code, and MODE
18705 match. The other alternatives either don't make sense or should
18706 never be generated. */
18708 void
18709 validate_condition_mode (enum rtx_code code, machine_mode mode)
18711 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18712 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18713 && GET_MODE_CLASS (mode) == MODE_CC);
18715 /* These don't make sense. */
18716 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18717 || mode != CCUNSmode);
18719 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18720 || mode == CCUNSmode);
18722 gcc_assert (mode == CCFPmode
18723 || (code != ORDERED && code != UNORDERED
18724 && code != UNEQ && code != LTGT
18725 && code != UNGT && code != UNLT
18726 && code != UNGE && code != UNLE));
18728 /* These should never be generated except for
18729 flag_finite_math_only. */
18730 gcc_assert (mode != CCFPmode
18731 || flag_finite_math_only
18732 || (code != LE && code != GE
18733 && code != UNEQ && code != LTGT
18734 && code != UNGT && code != UNLT));
18736 /* These are invalid; the information is not there. */
18737 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18741 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18742 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18743 not zero, store there the bit offset (counted from the right) where
18744 the single stretch of 1 bits begins; and similarly for B, the bit
18745 offset where it ends. */
18747 bool
18748 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18750 unsigned HOST_WIDE_INT val = INTVAL (mask);
18751 unsigned HOST_WIDE_INT bit;
18752 int nb, ne;
18753 int n = GET_MODE_PRECISION (mode);
18755 if (mode != DImode && mode != SImode)
18756 return false;
18758 if (INTVAL (mask) >= 0)
18760 bit = val & -val;
18761 ne = exact_log2 (bit);
18762 nb = exact_log2 (val + bit);
18764 else if (val + 1 == 0)
18766 nb = n;
18767 ne = 0;
18769 else if (val & 1)
18771 val = ~val;
18772 bit = val & -val;
18773 nb = exact_log2 (bit);
18774 ne = exact_log2 (val + bit);
18776 else
18778 bit = val & -val;
18779 ne = exact_log2 (bit);
18780 if (val + bit == 0)
18781 nb = n;
18782 else
18783 nb = 0;
18786 nb--;
18788 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18789 return false;
18791 if (b)
18792 *b = nb;
18793 if (e)
18794 *e = ne;
18796 return true;
18799 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18800 or rldicr instruction, to implement an AND with it in mode MODE. */
18802 bool
18803 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18805 int nb, ne;
18807 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18808 return false;
18810 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18811 does not wrap. */
18812 if (mode == DImode)
18813 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18815 /* For SImode, rlwinm can do everything. */
18816 if (mode == SImode)
18817 return (nb < 32 && ne < 32);
18819 return false;
18822 /* Return the instruction template for an AND with mask in mode MODE, with
18823 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18825 const char *
18826 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18828 int nb, ne;
18830 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18831 gcc_unreachable ();
18833 if (mode == DImode && ne == 0)
18835 operands[3] = GEN_INT (63 - nb);
18836 if (dot)
18837 return "rldicl. %0,%1,0,%3";
18838 return "rldicl %0,%1,0,%3";
18841 if (mode == DImode && nb == 63)
18843 operands[3] = GEN_INT (63 - ne);
18844 if (dot)
18845 return "rldicr. %0,%1,0,%3";
18846 return "rldicr %0,%1,0,%3";
18849 if (nb < 32 && ne < 32)
18851 operands[3] = GEN_INT (31 - nb);
18852 operands[4] = GEN_INT (31 - ne);
18853 if (dot)
18854 return "rlwinm. %0,%1,0,%3,%4";
18855 return "rlwinm %0,%1,0,%3,%4";
18858 gcc_unreachable ();
18861 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18862 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18863 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18865 bool
18866 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18868 int nb, ne;
18870 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18871 return false;
18873 int n = GET_MODE_PRECISION (mode);
18874 int sh = -1;
18876 if (CONST_INT_P (XEXP (shift, 1)))
18878 sh = INTVAL (XEXP (shift, 1));
18879 if (sh < 0 || sh >= n)
18880 return false;
18883 rtx_code code = GET_CODE (shift);
18885 /* Convert any shift by 0 to a rotate, to simplify below code. */
18886 if (sh == 0)
18887 code = ROTATE;
18889 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18890 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18891 code = ASHIFT;
18892 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18894 code = LSHIFTRT;
18895 sh = n - sh;
18898 /* DImode rotates need rld*. */
18899 if (mode == DImode && code == ROTATE)
18900 return (nb == 63 || ne == 0 || ne == sh);
18902 /* SImode rotates need rlw*. */
18903 if (mode == SImode && code == ROTATE)
18904 return (nb < 32 && ne < 32 && sh < 32);
18906 /* Wrap-around masks are only okay for rotates. */
18907 if (ne > nb)
18908 return false;
18910 /* Variable shifts are only okay for rotates. */
18911 if (sh < 0)
18912 return false;
18914 /* Don't allow ASHIFT if the mask is wrong for that. */
18915 if (code == ASHIFT && ne < sh)
18916 return false;
18918 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18919 if the mask is wrong for that. */
18920 if (nb < 32 && ne < 32 && sh < 32
18921 && !(code == LSHIFTRT && nb >= 32 - sh))
18922 return true;
18924 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18925 if the mask is wrong for that. */
18926 if (code == LSHIFTRT)
18927 sh = 64 - sh;
18928 if (nb == 63 || ne == 0 || ne == sh)
18929 return !(code == LSHIFTRT && nb >= sh);
18931 return false;
18934 /* Return the instruction template for a shift with mask in mode MODE, with
18935 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18937 const char *
18938 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18940 int nb, ne;
18942 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18943 gcc_unreachable ();
18945 if (mode == DImode && ne == 0)
18947 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18948 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18949 operands[3] = GEN_INT (63 - nb);
18950 if (dot)
18951 return "rld%I2cl. %0,%1,%2,%3";
18952 return "rld%I2cl %0,%1,%2,%3";
18955 if (mode == DImode && nb == 63)
18957 operands[3] = GEN_INT (63 - ne);
18958 if (dot)
18959 return "rld%I2cr. %0,%1,%2,%3";
18960 return "rld%I2cr %0,%1,%2,%3";
18963 if (mode == DImode
18964 && GET_CODE (operands[4]) != LSHIFTRT
18965 && CONST_INT_P (operands[2])
18966 && ne == INTVAL (operands[2]))
18968 operands[3] = GEN_INT (63 - nb);
18969 if (dot)
18970 return "rld%I2c. %0,%1,%2,%3";
18971 return "rld%I2c %0,%1,%2,%3";
18974 if (nb < 32 && ne < 32)
18976 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18977 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18978 operands[3] = GEN_INT (31 - nb);
18979 operands[4] = GEN_INT (31 - ne);
18980 /* This insn can also be a 64-bit rotate with mask that really makes
18981 it just a shift right (with mask); the %h below are to adjust for
18982 that situation (shift count is >= 32 in that case). */
18983 if (dot)
18984 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18985 return "rlw%I2nm %0,%1,%h2,%3,%4";
18988 gcc_unreachable ();
18991 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18992 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18993 ASHIFT, or LSHIFTRT) in mode MODE. */
18995 bool
18996 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18998 int nb, ne;
19000 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
19001 return false;
19003 int n = GET_MODE_PRECISION (mode);
19005 int sh = INTVAL (XEXP (shift, 1));
19006 if (sh < 0 || sh >= n)
19007 return false;
19009 rtx_code code = GET_CODE (shift);
19011 /* Convert any shift by 0 to a rotate, to simplify below code. */
19012 if (sh == 0)
19013 code = ROTATE;
19015 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19016 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
19017 code = ASHIFT;
19018 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
19020 code = LSHIFTRT;
19021 sh = n - sh;
19024 /* DImode rotates need rldimi. */
19025 if (mode == DImode && code == ROTATE)
19026 return (ne == sh);
19028 /* SImode rotates need rlwimi. */
19029 if (mode == SImode && code == ROTATE)
19030 return (nb < 32 && ne < 32 && sh < 32);
19032 /* Wrap-around masks are only okay for rotates. */
19033 if (ne > nb)
19034 return false;
19036 /* Don't allow ASHIFT if the mask is wrong for that. */
19037 if (code == ASHIFT && ne < sh)
19038 return false;
19040 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19041 if the mask is wrong for that. */
19042 if (nb < 32 && ne < 32 && sh < 32
19043 && !(code == LSHIFTRT && nb >= 32 - sh))
19044 return true;
19046 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19047 if the mask is wrong for that. */
19048 if (code == LSHIFTRT)
19049 sh = 64 - sh;
19050 if (ne == sh)
19051 return !(code == LSHIFTRT && nb >= sh);
19053 return false;
19056 /* Return the instruction template for an insert with mask in mode MODE, with
19057 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19059 const char *
19060 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
19062 int nb, ne;
19064 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19065 gcc_unreachable ();
19067 /* Prefer rldimi because rlwimi is cracked. */
19068 if (TARGET_POWERPC64
19069 && (!dot || mode == DImode)
19070 && GET_CODE (operands[4]) != LSHIFTRT
19071 && ne == INTVAL (operands[2]))
19073 operands[3] = GEN_INT (63 - nb);
19074 if (dot)
19075 return "rldimi. %0,%1,%2,%3";
19076 return "rldimi %0,%1,%2,%3";
19079 if (nb < 32 && ne < 32)
19081 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19082 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19083 operands[3] = GEN_INT (31 - nb);
19084 operands[4] = GEN_INT (31 - ne);
19085 if (dot)
19086 return "rlwimi. %0,%1,%2,%3,%4";
19087 return "rlwimi %0,%1,%2,%3,%4";
19090 gcc_unreachable ();
19093 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19094 using two machine instructions. */
19096 bool
19097 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19099 /* There are two kinds of AND we can handle with two insns:
19100 1) those we can do with two rl* insn;
19101 2) ori[s];xori[s].
19103 We do not handle that last case yet. */
19105 /* If there is just one stretch of ones, we can do it. */
19106 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19107 return true;
19109 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19110 one insn, we can do the whole thing with two. */
19111 unsigned HOST_WIDE_INT val = INTVAL (c);
19112 unsigned HOST_WIDE_INT bit1 = val & -val;
19113 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19114 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19115 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19116 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19119 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19120 If EXPAND is true, split rotate-and-mask instructions we generate to
19121 their constituent parts as well (this is used during expand); if DOT
19122 is 1, make the last insn a record-form instruction clobbering the
19123 destination GPR and setting the CC reg (from operands[3]); if 2, set
19124 that GPR as well as the CC reg. */
19126 void
19127 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19129 gcc_assert (!(expand && dot));
19131 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19133 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19134 shift right. This generates better code than doing the masks without
19135 shifts, or shifting first right and then left. */
19136 int nb, ne;
19137 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19139 gcc_assert (mode == DImode);
19141 int shift = 63 - nb;
19142 if (expand)
19144 rtx tmp1 = gen_reg_rtx (DImode);
19145 rtx tmp2 = gen_reg_rtx (DImode);
19146 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19147 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19148 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19150 else
19152 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19153 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19154 emit_move_insn (operands[0], tmp);
19155 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19156 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19158 return;
19161 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19162 that does the rest. */
19163 unsigned HOST_WIDE_INT bit1 = val & -val;
19164 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19165 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19166 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19168 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19169 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19171 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19173 /* Two "no-rotate"-and-mask instructions, for SImode. */
19174 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19176 gcc_assert (mode == SImode);
19178 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19179 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19180 emit_move_insn (reg, tmp);
19181 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19182 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19183 return;
19186 gcc_assert (mode == DImode);
19188 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19189 insns; we have to do the first in SImode, because it wraps. */
19190 if (mask2 <= 0xffffffff
19191 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19193 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19194 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19195 GEN_INT (mask1));
19196 rtx reg_low = gen_lowpart (SImode, reg);
19197 emit_move_insn (reg_low, tmp);
19198 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19199 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19200 return;
19203 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19204 at the top end), rotate back and clear the other hole. */
19205 int right = exact_log2 (bit3);
19206 int left = 64 - right;
19208 /* Rotate the mask too. */
19209 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19211 if (expand)
19213 rtx tmp1 = gen_reg_rtx (DImode);
19214 rtx tmp2 = gen_reg_rtx (DImode);
19215 rtx tmp3 = gen_reg_rtx (DImode);
19216 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19217 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19218 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19219 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19221 else
19223 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19224 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19225 emit_move_insn (operands[0], tmp);
19226 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19227 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19228 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19232 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19233 for lfq and stfq insns iff the registers are hard registers. */
19236 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19238 /* We might have been passed a SUBREG. */
19239 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19240 return 0;
19242 /* We might have been passed non floating point registers. */
19243 if (!FP_REGNO_P (REGNO (reg1))
19244 || !FP_REGNO_P (REGNO (reg2)))
19245 return 0;
19247 return (REGNO (reg1) == REGNO (reg2) - 1);
19250 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19251 addr1 and addr2 must be in consecutive memory locations
19252 (addr2 == addr1 + 8). */
19255 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19257 rtx addr1, addr2;
19258 unsigned int reg1, reg2;
19259 int offset1, offset2;
19261 /* The mems cannot be volatile. */
19262 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19263 return 0;
19265 addr1 = XEXP (mem1, 0);
19266 addr2 = XEXP (mem2, 0);
19268 /* Extract an offset (if used) from the first addr. */
19269 if (GET_CODE (addr1) == PLUS)
19271 /* If not a REG, return zero. */
19272 if (GET_CODE (XEXP (addr1, 0)) != REG)
19273 return 0;
19274 else
19276 reg1 = REGNO (XEXP (addr1, 0));
19277 /* The offset must be constant! */
19278 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19279 return 0;
19280 offset1 = INTVAL (XEXP (addr1, 1));
19283 else if (GET_CODE (addr1) != REG)
19284 return 0;
19285 else
19287 reg1 = REGNO (addr1);
19288 /* This was a simple (mem (reg)) expression. Offset is 0. */
19289 offset1 = 0;
19292 /* And now for the second addr. */
19293 if (GET_CODE (addr2) == PLUS)
19295 /* If not a REG, return zero. */
19296 if (GET_CODE (XEXP (addr2, 0)) != REG)
19297 return 0;
19298 else
19300 reg2 = REGNO (XEXP (addr2, 0));
19301 /* The offset must be constant. */
19302 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19303 return 0;
19304 offset2 = INTVAL (XEXP (addr2, 1));
19307 else if (GET_CODE (addr2) != REG)
19308 return 0;
19309 else
19311 reg2 = REGNO (addr2);
19312 /* This was a simple (mem (reg)) expression. Offset is 0. */
19313 offset2 = 0;
19316 /* Both of these must have the same base register. */
19317 if (reg1 != reg2)
19318 return 0;
19320 /* The offset for the second addr must be 8 more than the first addr. */
19321 if (offset2 != offset1 + 8)
19322 return 0;
19324 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19325 instructions. */
19326 return 1;
19329 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19330 need to use DDmode, in all other cases we can use the same mode. */
19331 static machine_mode
19332 rs6000_secondary_memory_needed_mode (machine_mode mode)
19334 if (lra_in_progress && mode == SDmode)
19335 return DDmode;
19336 return mode;
19339 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19340 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19341 only work on the traditional altivec registers, note if an altivec register
19342 was chosen. */
19344 static enum rs6000_reg_type
19345 register_to_reg_type (rtx reg, bool *is_altivec)
19347 HOST_WIDE_INT regno;
19348 enum reg_class rclass;
19350 if (GET_CODE (reg) == SUBREG)
19351 reg = SUBREG_REG (reg);
19353 if (!REG_P (reg))
19354 return NO_REG_TYPE;
19356 regno = REGNO (reg);
19357 if (regno >= FIRST_PSEUDO_REGISTER)
19359 if (!lra_in_progress && !reload_completed)
19360 return PSEUDO_REG_TYPE;
19362 regno = true_regnum (reg);
19363 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19364 return PSEUDO_REG_TYPE;
19367 gcc_assert (regno >= 0);
19369 if (is_altivec && ALTIVEC_REGNO_P (regno))
19370 *is_altivec = true;
19372 rclass = rs6000_regno_regclass[regno];
19373 return reg_class_to_reg_type[(int)rclass];
19376 /* Helper function to return the cost of adding a TOC entry address. */
19378 static inline int
19379 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19381 int ret;
19383 if (TARGET_CMODEL != CMODEL_SMALL)
19384 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19386 else
19387 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19389 return ret;
19392 /* Helper function for rs6000_secondary_reload to determine whether the memory
19393 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19394 needs reloading. Return negative if the memory is not handled by the memory
19395 helper functions and to try a different reload method, 0 if no additional
19396 instructions are need, and positive to give the extra cost for the
19397 memory. */
19399 static int
19400 rs6000_secondary_reload_memory (rtx addr,
19401 enum reg_class rclass,
19402 machine_mode mode)
19404 int extra_cost = 0;
19405 rtx reg, and_arg, plus_arg0, plus_arg1;
19406 addr_mask_type addr_mask;
19407 const char *type = NULL;
19408 const char *fail_msg = NULL;
19410 if (GPR_REG_CLASS_P (rclass))
19411 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19413 else if (rclass == FLOAT_REGS)
19414 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19416 else if (rclass == ALTIVEC_REGS)
19417 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19419 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19420 else if (rclass == VSX_REGS)
19421 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19422 & ~RELOAD_REG_AND_M16);
19424 /* If the register allocator hasn't made up its mind yet on the register
19425 class to use, settle on defaults to use. */
19426 else if (rclass == NO_REGS)
19428 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19429 & ~RELOAD_REG_AND_M16);
19431 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19432 addr_mask &= ~(RELOAD_REG_INDEXED
19433 | RELOAD_REG_PRE_INCDEC
19434 | RELOAD_REG_PRE_MODIFY);
19437 else
19438 addr_mask = 0;
19440 /* If the register isn't valid in this register class, just return now. */
19441 if ((addr_mask & RELOAD_REG_VALID) == 0)
19443 if (TARGET_DEBUG_ADDR)
19445 fprintf (stderr,
19446 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19447 "not valid in class\n",
19448 GET_MODE_NAME (mode), reg_class_names[rclass]);
19449 debug_rtx (addr);
19452 return -1;
19455 switch (GET_CODE (addr))
19457 /* Does the register class supports auto update forms for this mode? We
19458 don't need a scratch register, since the powerpc only supports
19459 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19460 case PRE_INC:
19461 case PRE_DEC:
19462 reg = XEXP (addr, 0);
19463 if (!base_reg_operand (addr, GET_MODE (reg)))
19465 fail_msg = "no base register #1";
19466 extra_cost = -1;
19469 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19471 extra_cost = 1;
19472 type = "update";
19474 break;
19476 case PRE_MODIFY:
19477 reg = XEXP (addr, 0);
19478 plus_arg1 = XEXP (addr, 1);
19479 if (!base_reg_operand (reg, GET_MODE (reg))
19480 || GET_CODE (plus_arg1) != PLUS
19481 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19483 fail_msg = "bad PRE_MODIFY";
19484 extra_cost = -1;
19487 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19489 extra_cost = 1;
19490 type = "update";
19492 break;
19494 /* Do we need to simulate AND -16 to clear the bottom address bits used
19495 in VMX load/stores? Only allow the AND for vector sizes. */
19496 case AND:
19497 and_arg = XEXP (addr, 0);
19498 if (GET_MODE_SIZE (mode) != 16
19499 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19500 || INTVAL (XEXP (addr, 1)) != -16)
19502 fail_msg = "bad Altivec AND #1";
19503 extra_cost = -1;
19506 if (rclass != ALTIVEC_REGS)
19508 if (legitimate_indirect_address_p (and_arg, false))
19509 extra_cost = 1;
19511 else if (legitimate_indexed_address_p (and_arg, false))
19512 extra_cost = 2;
19514 else
19516 fail_msg = "bad Altivec AND #2";
19517 extra_cost = -1;
19520 type = "and";
19522 break;
19524 /* If this is an indirect address, make sure it is a base register. */
19525 case REG:
19526 case SUBREG:
19527 if (!legitimate_indirect_address_p (addr, false))
19529 extra_cost = 1;
19530 type = "move";
19532 break;
19534 /* If this is an indexed address, make sure the register class can handle
19535 indexed addresses for this mode. */
19536 case PLUS:
19537 plus_arg0 = XEXP (addr, 0);
19538 plus_arg1 = XEXP (addr, 1);
19540 /* (plus (plus (reg) (constant)) (constant)) is generated during
19541 push_reload processing, so handle it now. */
19542 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19544 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19546 extra_cost = 1;
19547 type = "offset";
19551 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19552 push_reload processing, so handle it now. */
19553 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19555 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19557 extra_cost = 1;
19558 type = "indexed #2";
19562 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19564 fail_msg = "no base register #2";
19565 extra_cost = -1;
19568 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19570 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19571 || !legitimate_indexed_address_p (addr, false))
19573 extra_cost = 1;
19574 type = "indexed";
19578 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19579 && CONST_INT_P (plus_arg1))
19581 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19583 extra_cost = 1;
19584 type = "vector d-form offset";
19588 /* Make sure the register class can handle offset addresses. */
19589 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19591 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19593 extra_cost = 1;
19594 type = "offset #2";
19598 else
19600 fail_msg = "bad PLUS";
19601 extra_cost = -1;
19604 break;
19606 case LO_SUM:
19607 /* Quad offsets are restricted and can't handle normal addresses. */
19608 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19610 extra_cost = -1;
19611 type = "vector d-form lo_sum";
19614 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19616 fail_msg = "bad LO_SUM";
19617 extra_cost = -1;
19620 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19622 extra_cost = 1;
19623 type = "lo_sum";
19625 break;
19627 /* Static addresses need to create a TOC entry. */
19628 case CONST:
19629 case SYMBOL_REF:
19630 case LABEL_REF:
19631 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19633 extra_cost = -1;
19634 type = "vector d-form lo_sum #2";
19637 else
19639 type = "address";
19640 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19642 break;
19644 /* TOC references look like offsetable memory. */
19645 case UNSPEC:
19646 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19648 fail_msg = "bad UNSPEC";
19649 extra_cost = -1;
19652 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19654 extra_cost = -1;
19655 type = "vector d-form lo_sum #3";
19658 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19660 extra_cost = 1;
19661 type = "toc reference";
19663 break;
19665 default:
19667 fail_msg = "bad address";
19668 extra_cost = -1;
19672 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19674 if (extra_cost < 0)
19675 fprintf (stderr,
19676 "rs6000_secondary_reload_memory error: mode = %s, "
19677 "class = %s, addr_mask = '%s', %s\n",
19678 GET_MODE_NAME (mode),
19679 reg_class_names[rclass],
19680 rs6000_debug_addr_mask (addr_mask, false),
19681 (fail_msg != NULL) ? fail_msg : "<bad address>");
19683 else
19684 fprintf (stderr,
19685 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19686 "addr_mask = '%s', extra cost = %d, %s\n",
19687 GET_MODE_NAME (mode),
19688 reg_class_names[rclass],
19689 rs6000_debug_addr_mask (addr_mask, false),
19690 extra_cost,
19691 (type) ? type : "<none>");
19693 debug_rtx (addr);
19696 return extra_cost;
19699 /* Helper function for rs6000_secondary_reload to return true if a move to a
19700 different register classe is really a simple move. */
19702 static bool
19703 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19704 enum rs6000_reg_type from_type,
19705 machine_mode mode)
19707 int size = GET_MODE_SIZE (mode);
19709 /* Add support for various direct moves available. In this function, we only
19710 look at cases where we don't need any extra registers, and one or more
19711 simple move insns are issued. Originally small integers are not allowed
19712 in FPR/VSX registers. Single precision binary floating is not a simple
19713 move because we need to convert to the single precision memory layout.
19714 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19715 need special direct move handling, which we do not support yet. */
19716 if (TARGET_DIRECT_MOVE
19717 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19718 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19720 if (TARGET_POWERPC64)
19722 /* ISA 2.07: MTVSRD or MVFVSRD. */
19723 if (size == 8)
19724 return true;
19726 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19727 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19728 return true;
19731 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19732 if (TARGET_P8_VECTOR)
19734 if (mode == SImode)
19735 return true;
19737 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19738 return true;
19741 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19742 if (mode == SDmode)
19743 return true;
19746 /* Power6+: MFTGPR or MFFGPR. */
19747 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19748 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19749 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19750 return true;
19752 /* Move to/from SPR. */
19753 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19754 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19755 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19756 return true;
19758 return false;
19761 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19762 special direct moves that involve allocating an extra register, return the
19763 insn code of the helper function if there is such a function or
19764 CODE_FOR_nothing if not. */
19766 static bool
19767 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19768 enum rs6000_reg_type from_type,
19769 machine_mode mode,
19770 secondary_reload_info *sri,
19771 bool altivec_p)
19773 bool ret = false;
19774 enum insn_code icode = CODE_FOR_nothing;
19775 int cost = 0;
19776 int size = GET_MODE_SIZE (mode);
19778 if (TARGET_POWERPC64 && size == 16)
19780 /* Handle moving 128-bit values from GPRs to VSX point registers on
19781 ISA 2.07 (power8, power9) when running in 64-bit mode using
19782 XXPERMDI to glue the two 64-bit values back together. */
19783 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19785 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19786 icode = reg_addr[mode].reload_vsx_gpr;
19789 /* Handle moving 128-bit values from VSX point registers to GPRs on
19790 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19791 bottom 64-bit value. */
19792 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19794 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19795 icode = reg_addr[mode].reload_gpr_vsx;
19799 else if (TARGET_POWERPC64 && mode == SFmode)
19801 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19803 cost = 3; /* xscvdpspn, mfvsrd, and. */
19804 icode = reg_addr[mode].reload_gpr_vsx;
19807 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19809 cost = 2; /* mtvsrz, xscvspdpn. */
19810 icode = reg_addr[mode].reload_vsx_gpr;
19814 else if (!TARGET_POWERPC64 && size == 8)
19816 /* Handle moving 64-bit values from GPRs to floating point registers on
19817 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19818 32-bit values back together. Altivec register classes must be handled
19819 specially since a different instruction is used, and the secondary
19820 reload support requires a single instruction class in the scratch
19821 register constraint. However, right now TFmode is not allowed in
19822 Altivec registers, so the pattern will never match. */
19823 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19825 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19826 icode = reg_addr[mode].reload_fpr_gpr;
19830 if (icode != CODE_FOR_nothing)
19832 ret = true;
19833 if (sri)
19835 sri->icode = icode;
19836 sri->extra_cost = cost;
19840 return ret;
19843 /* Return whether a move between two register classes can be done either
19844 directly (simple move) or via a pattern that uses a single extra temporary
19845 (using ISA 2.07's direct move in this case. */
19847 static bool
19848 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19849 enum rs6000_reg_type from_type,
19850 machine_mode mode,
19851 secondary_reload_info *sri,
19852 bool altivec_p)
19854 /* Fall back to load/store reloads if either type is not a register. */
19855 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19856 return false;
19858 /* If we haven't allocated registers yet, assume the move can be done for the
19859 standard register types. */
19860 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19861 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19862 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19863 return true;
19865 /* Moves to the same set of registers is a simple move for non-specialized
19866 registers. */
19867 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19868 return true;
19870 /* Check whether a simple move can be done directly. */
19871 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19873 if (sri)
19875 sri->icode = CODE_FOR_nothing;
19876 sri->extra_cost = 0;
19878 return true;
19881 /* Now check if we can do it in a few steps. */
19882 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19883 altivec_p);
19886 /* Inform reload about cases where moving X with a mode MODE to a register in
19887 RCLASS requires an extra scratch or immediate register. Return the class
19888 needed for the immediate register.
19890 For VSX and Altivec, we may need a register to convert sp+offset into
19891 reg+sp.
19893 For misaligned 64-bit gpr loads and stores we need a register to
19894 convert an offset address to indirect. */
19896 static reg_class_t
19897 rs6000_secondary_reload (bool in_p,
19898 rtx x,
19899 reg_class_t rclass_i,
19900 machine_mode mode,
19901 secondary_reload_info *sri)
19903 enum reg_class rclass = (enum reg_class) rclass_i;
19904 reg_class_t ret = ALL_REGS;
19905 enum insn_code icode;
19906 bool default_p = false;
19907 bool done_p = false;
19909 /* Allow subreg of memory before/during reload. */
19910 bool memory_p = (MEM_P (x)
19911 || (!reload_completed && GET_CODE (x) == SUBREG
19912 && MEM_P (SUBREG_REG (x))));
19914 sri->icode = CODE_FOR_nothing;
19915 sri->t_icode = CODE_FOR_nothing;
19916 sri->extra_cost = 0;
19917 icode = ((in_p)
19918 ? reg_addr[mode].reload_load
19919 : reg_addr[mode].reload_store);
19921 if (REG_P (x) || register_operand (x, mode))
19923 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19924 bool altivec_p = (rclass == ALTIVEC_REGS);
19925 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19927 if (!in_p)
19928 std::swap (to_type, from_type);
19930 /* Can we do a direct move of some sort? */
19931 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19932 altivec_p))
19934 icode = (enum insn_code)sri->icode;
19935 default_p = false;
19936 done_p = true;
19937 ret = NO_REGS;
19941 /* Make sure 0.0 is not reloaded or forced into memory. */
19942 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19944 ret = NO_REGS;
19945 default_p = false;
19946 done_p = true;
19949 /* If this is a scalar floating point value and we want to load it into the
19950 traditional Altivec registers, do it via a move via a traditional floating
19951 point register, unless we have D-form addressing. Also make sure that
19952 non-zero constants use a FPR. */
19953 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19954 && !mode_supports_vmx_dform (mode)
19955 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19956 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19958 ret = FLOAT_REGS;
19959 default_p = false;
19960 done_p = true;
19963 /* Handle reload of load/stores if we have reload helper functions. */
19964 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19966 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19967 mode);
19969 if (extra_cost >= 0)
19971 done_p = true;
19972 ret = NO_REGS;
19973 if (extra_cost > 0)
19975 sri->extra_cost = extra_cost;
19976 sri->icode = icode;
19981 /* Handle unaligned loads and stores of integer registers. */
19982 if (!done_p && TARGET_POWERPC64
19983 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19984 && memory_p
19985 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19987 rtx addr = XEXP (x, 0);
19988 rtx off = address_offset (addr);
19990 if (off != NULL_RTX)
19992 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19993 unsigned HOST_WIDE_INT offset = INTVAL (off);
19995 /* We need a secondary reload when our legitimate_address_p
19996 says the address is good (as otherwise the entire address
19997 will be reloaded), and the offset is not a multiple of
19998 four or we have an address wrap. Address wrap will only
19999 occur for LO_SUMs since legitimate_offset_address_p
20000 rejects addresses for 16-byte mems that will wrap. */
20001 if (GET_CODE (addr) == LO_SUM
20002 ? (1 /* legitimate_address_p allows any offset for lo_sum */
20003 && ((offset & 3) != 0
20004 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
20005 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
20006 && (offset & 3) != 0))
20008 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
20009 if (in_p)
20010 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
20011 : CODE_FOR_reload_di_load);
20012 else
20013 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
20014 : CODE_FOR_reload_di_store);
20015 sri->extra_cost = 2;
20016 ret = NO_REGS;
20017 done_p = true;
20019 else
20020 default_p = true;
20022 else
20023 default_p = true;
20026 if (!done_p && !TARGET_POWERPC64
20027 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20028 && memory_p
20029 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
20031 rtx addr = XEXP (x, 0);
20032 rtx off = address_offset (addr);
20034 if (off != NULL_RTX)
20036 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20037 unsigned HOST_WIDE_INT offset = INTVAL (off);
20039 /* We need a secondary reload when our legitimate_address_p
20040 says the address is good (as otherwise the entire address
20041 will be reloaded), and we have a wrap.
20043 legitimate_lo_sum_address_p allows LO_SUM addresses to
20044 have any offset so test for wrap in the low 16 bits.
20046 legitimate_offset_address_p checks for the range
20047 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20048 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20049 [0x7ff4,0x7fff] respectively, so test for the
20050 intersection of these ranges, [0x7ffc,0x7fff] and
20051 [0x7ff4,0x7ff7] respectively.
20053 Note that the address we see here may have been
20054 manipulated by legitimize_reload_address. */
20055 if (GET_CODE (addr) == LO_SUM
20056 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
20057 : offset - (0x8000 - extra) < UNITS_PER_WORD)
20059 if (in_p)
20060 sri->icode = CODE_FOR_reload_si_load;
20061 else
20062 sri->icode = CODE_FOR_reload_si_store;
20063 sri->extra_cost = 2;
20064 ret = NO_REGS;
20065 done_p = true;
20067 else
20068 default_p = true;
20070 else
20071 default_p = true;
20074 if (!done_p)
20075 default_p = true;
20077 if (default_p)
20078 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20080 gcc_assert (ret != ALL_REGS);
20082 if (TARGET_DEBUG_ADDR)
20084 fprintf (stderr,
20085 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20086 "mode = %s",
20087 reg_class_names[ret],
20088 in_p ? "true" : "false",
20089 reg_class_names[rclass],
20090 GET_MODE_NAME (mode));
20092 if (reload_completed)
20093 fputs (", after reload", stderr);
20095 if (!done_p)
20096 fputs (", done_p not set", stderr);
20098 if (default_p)
20099 fputs (", default secondary reload", stderr);
20101 if (sri->icode != CODE_FOR_nothing)
20102 fprintf (stderr, ", reload func = %s, extra cost = %d",
20103 insn_data[sri->icode].name, sri->extra_cost);
20105 else if (sri->extra_cost > 0)
20106 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20108 fputs ("\n", stderr);
20109 debug_rtx (x);
20112 return ret;
20115 /* Better tracing for rs6000_secondary_reload_inner. */
20117 static void
20118 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20119 bool store_p)
20121 rtx set, clobber;
20123 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20125 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20126 store_p ? "store" : "load");
20128 if (store_p)
20129 set = gen_rtx_SET (mem, reg);
20130 else
20131 set = gen_rtx_SET (reg, mem);
20133 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20134 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20137 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20138 ATTRIBUTE_NORETURN;
20140 static void
20141 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20142 bool store_p)
20144 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20145 gcc_unreachable ();
20148 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20149 reload helper functions. These were identified in
20150 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20151 reload, it calls the insns:
20152 reload_<RELOAD:mode>_<P:mptrsize>_store
20153 reload_<RELOAD:mode>_<P:mptrsize>_load
20155 which in turn calls this function, to do whatever is necessary to create
20156 valid addresses. */
20158 void
20159 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20161 int regno = true_regnum (reg);
20162 machine_mode mode = GET_MODE (reg);
20163 addr_mask_type addr_mask;
20164 rtx addr;
20165 rtx new_addr;
20166 rtx op_reg, op0, op1;
20167 rtx and_op;
20168 rtx cc_clobber;
20169 rtvec rv;
20171 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20172 || !base_reg_operand (scratch, GET_MODE (scratch)))
20173 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20175 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20176 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20178 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20179 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20181 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20182 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20184 else
20185 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20187 /* Make sure the mode is valid in this register class. */
20188 if ((addr_mask & RELOAD_REG_VALID) == 0)
20189 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20191 if (TARGET_DEBUG_ADDR)
20192 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20194 new_addr = addr = XEXP (mem, 0);
20195 switch (GET_CODE (addr))
20197 /* Does the register class support auto update forms for this mode? If
20198 not, do the update now. We don't need a scratch register, since the
20199 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20200 case PRE_INC:
20201 case PRE_DEC:
20202 op_reg = XEXP (addr, 0);
20203 if (!base_reg_operand (op_reg, Pmode))
20204 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20206 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20208 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20209 new_addr = op_reg;
20211 break;
20213 case PRE_MODIFY:
20214 op0 = XEXP (addr, 0);
20215 op1 = XEXP (addr, 1);
20216 if (!base_reg_operand (op0, Pmode)
20217 || GET_CODE (op1) != PLUS
20218 || !rtx_equal_p (op0, XEXP (op1, 0)))
20219 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20221 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20223 emit_insn (gen_rtx_SET (op0, op1));
20224 new_addr = reg;
20226 break;
20228 /* Do we need to simulate AND -16 to clear the bottom address bits used
20229 in VMX load/stores? */
20230 case AND:
20231 op0 = XEXP (addr, 0);
20232 op1 = XEXP (addr, 1);
20233 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20235 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20236 op_reg = op0;
20238 else if (GET_CODE (op1) == PLUS)
20240 emit_insn (gen_rtx_SET (scratch, op1));
20241 op_reg = scratch;
20244 else
20245 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20247 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20248 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20249 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20250 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20251 new_addr = scratch;
20253 break;
20255 /* If this is an indirect address, make sure it is a base register. */
20256 case REG:
20257 case SUBREG:
20258 if (!base_reg_operand (addr, GET_MODE (addr)))
20260 emit_insn (gen_rtx_SET (scratch, addr));
20261 new_addr = scratch;
20263 break;
20265 /* If this is an indexed address, make sure the register class can handle
20266 indexed addresses for this mode. */
20267 case PLUS:
20268 op0 = XEXP (addr, 0);
20269 op1 = XEXP (addr, 1);
20270 if (!base_reg_operand (op0, Pmode))
20271 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20273 else if (int_reg_operand (op1, Pmode))
20275 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20277 emit_insn (gen_rtx_SET (scratch, addr));
20278 new_addr = scratch;
20282 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20284 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20285 || !quad_address_p (addr, mode, false))
20287 emit_insn (gen_rtx_SET (scratch, addr));
20288 new_addr = scratch;
20292 /* Make sure the register class can handle offset addresses. */
20293 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20295 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20297 emit_insn (gen_rtx_SET (scratch, addr));
20298 new_addr = scratch;
20302 else
20303 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20305 break;
20307 case LO_SUM:
20308 op0 = XEXP (addr, 0);
20309 op1 = XEXP (addr, 1);
20310 if (!base_reg_operand (op0, Pmode))
20311 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20313 else if (int_reg_operand (op1, Pmode))
20315 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20317 emit_insn (gen_rtx_SET (scratch, addr));
20318 new_addr = scratch;
20322 /* Quad offsets are restricted and can't handle normal addresses. */
20323 else if (mode_supports_vsx_dform_quad (mode))
20325 emit_insn (gen_rtx_SET (scratch, addr));
20326 new_addr = scratch;
20329 /* Make sure the register class can handle offset addresses. */
20330 else if (legitimate_lo_sum_address_p (mode, addr, false))
20332 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20334 emit_insn (gen_rtx_SET (scratch, addr));
20335 new_addr = scratch;
20339 else
20340 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20342 break;
20344 case SYMBOL_REF:
20345 case CONST:
20346 case LABEL_REF:
20347 rs6000_emit_move (scratch, addr, Pmode);
20348 new_addr = scratch;
20349 break;
20351 default:
20352 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20355 /* Adjust the address if it changed. */
20356 if (addr != new_addr)
20358 mem = replace_equiv_address_nv (mem, new_addr);
20359 if (TARGET_DEBUG_ADDR)
20360 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20363 /* Now create the move. */
20364 if (store_p)
20365 emit_insn (gen_rtx_SET (mem, reg));
20366 else
20367 emit_insn (gen_rtx_SET (reg, mem));
20369 return;
20372 /* Convert reloads involving 64-bit gprs and misaligned offset
20373 addressing, or multiple 32-bit gprs and offsets that are too large,
20374 to use indirect addressing. */
20376 void
20377 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20379 int regno = true_regnum (reg);
20380 enum reg_class rclass;
20381 rtx addr;
20382 rtx scratch_or_premodify = scratch;
20384 if (TARGET_DEBUG_ADDR)
20386 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20387 store_p ? "store" : "load");
20388 fprintf (stderr, "reg:\n");
20389 debug_rtx (reg);
20390 fprintf (stderr, "mem:\n");
20391 debug_rtx (mem);
20392 fprintf (stderr, "scratch:\n");
20393 debug_rtx (scratch);
20396 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20397 gcc_assert (GET_CODE (mem) == MEM);
20398 rclass = REGNO_REG_CLASS (regno);
20399 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20400 addr = XEXP (mem, 0);
20402 if (GET_CODE (addr) == PRE_MODIFY)
20404 gcc_assert (REG_P (XEXP (addr, 0))
20405 && GET_CODE (XEXP (addr, 1)) == PLUS
20406 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20407 scratch_or_premodify = XEXP (addr, 0);
20408 if (!HARD_REGISTER_P (scratch_or_premodify))
20409 /* If we have a pseudo here then reload will have arranged
20410 to have it replaced, but only in the original insn.
20411 Use the replacement here too. */
20412 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20414 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20415 expressions from the original insn, without unsharing them.
20416 Any RTL that points into the original insn will of course
20417 have register replacements applied. That is why we don't
20418 need to look for replacements under the PLUS. */
20419 addr = XEXP (addr, 1);
20421 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20423 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20425 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20427 /* Now create the move. */
20428 if (store_p)
20429 emit_insn (gen_rtx_SET (mem, reg));
20430 else
20431 emit_insn (gen_rtx_SET (reg, mem));
20433 return;
20436 /* Given an rtx X being reloaded into a reg required to be
20437 in class CLASS, return the class of reg to actually use.
20438 In general this is just CLASS; but on some machines
20439 in some cases it is preferable to use a more restrictive class.
20441 On the RS/6000, we have to return NO_REGS when we want to reload a
20442 floating-point CONST_DOUBLE to force it to be copied to memory.
20444 We also don't want to reload integer values into floating-point
20445 registers if we can at all help it. In fact, this can
20446 cause reload to die, if it tries to generate a reload of CTR
20447 into a FP register and discovers it doesn't have the memory location
20448 required.
20450 ??? Would it be a good idea to have reload do the converse, that is
20451 try to reload floating modes into FP registers if possible?
20454 static enum reg_class
20455 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20457 machine_mode mode = GET_MODE (x);
20458 bool is_constant = CONSTANT_P (x);
20460 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20461 reload class for it. */
20462 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20463 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20464 return NO_REGS;
20466 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20467 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20468 return NO_REGS;
20470 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20471 the reloading of address expressions using PLUS into floating point
20472 registers. */
20473 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20475 if (is_constant)
20477 /* Zero is always allowed in all VSX registers. */
20478 if (x == CONST0_RTX (mode))
20479 return rclass;
20481 /* If this is a vector constant that can be formed with a few Altivec
20482 instructions, we want altivec registers. */
20483 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20484 return ALTIVEC_REGS;
20486 /* If this is an integer constant that can easily be loaded into
20487 vector registers, allow it. */
20488 if (CONST_INT_P (x))
20490 HOST_WIDE_INT value = INTVAL (x);
20492 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20493 2.06 can generate it in the Altivec registers with
20494 VSPLTI<x>. */
20495 if (value == -1)
20497 if (TARGET_P8_VECTOR)
20498 return rclass;
20499 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20500 return ALTIVEC_REGS;
20501 else
20502 return NO_REGS;
20505 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20506 a sign extend in the Altivec registers. */
20507 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20508 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20509 return ALTIVEC_REGS;
20512 /* Force constant to memory. */
20513 return NO_REGS;
20516 /* D-form addressing can easily reload the value. */
20517 if (mode_supports_vmx_dform (mode)
20518 || mode_supports_vsx_dform_quad (mode))
20519 return rclass;
20521 /* If this is a scalar floating point value and we don't have D-form
20522 addressing, prefer the traditional floating point registers so that we
20523 can use D-form (register+offset) addressing. */
20524 if (rclass == VSX_REGS
20525 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20526 return FLOAT_REGS;
20528 /* Prefer the Altivec registers if Altivec is handling the vector
20529 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20530 loads. */
20531 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20532 || mode == V1TImode)
20533 return ALTIVEC_REGS;
20535 return rclass;
20538 if (is_constant || GET_CODE (x) == PLUS)
20540 if (reg_class_subset_p (GENERAL_REGS, rclass))
20541 return GENERAL_REGS;
20542 if (reg_class_subset_p (BASE_REGS, rclass))
20543 return BASE_REGS;
20544 return NO_REGS;
20547 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20548 return GENERAL_REGS;
20550 return rclass;
20553 /* Debug version of rs6000_preferred_reload_class. */
20554 static enum reg_class
20555 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20557 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20559 fprintf (stderr,
20560 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20561 "mode = %s, x:\n",
20562 reg_class_names[ret], reg_class_names[rclass],
20563 GET_MODE_NAME (GET_MODE (x)));
20564 debug_rtx (x);
20566 return ret;
20569 /* If we are copying between FP or AltiVec registers and anything else, we need
20570 a memory location. The exception is when we are targeting ppc64 and the
20571 move to/from fpr to gpr instructions are available. Also, under VSX, you
20572 can copy vector registers from the FP register set to the Altivec register
20573 set and vice versa. */
20575 static bool
20576 rs6000_secondary_memory_needed (machine_mode mode,
20577 reg_class_t from_class,
20578 reg_class_t to_class)
20580 enum rs6000_reg_type from_type, to_type;
20581 bool altivec_p = ((from_class == ALTIVEC_REGS)
20582 || (to_class == ALTIVEC_REGS));
20584 /* If a simple/direct move is available, we don't need secondary memory */
20585 from_type = reg_class_to_reg_type[(int)from_class];
20586 to_type = reg_class_to_reg_type[(int)to_class];
20588 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20589 (secondary_reload_info *)0, altivec_p))
20590 return false;
20592 /* If we have a floating point or vector register class, we need to use
20593 memory to transfer the data. */
20594 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20595 return true;
20597 return false;
20600 /* Debug version of rs6000_secondary_memory_needed. */
20601 static bool
20602 rs6000_debug_secondary_memory_needed (machine_mode mode,
20603 reg_class_t from_class,
20604 reg_class_t to_class)
20606 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20608 fprintf (stderr,
20609 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20610 "to_class = %s, mode = %s\n",
20611 ret ? "true" : "false",
20612 reg_class_names[from_class],
20613 reg_class_names[to_class],
20614 GET_MODE_NAME (mode));
20616 return ret;
20619 /* Return the register class of a scratch register needed to copy IN into
20620 or out of a register in RCLASS in MODE. If it can be done directly,
20621 NO_REGS is returned. */
20623 static enum reg_class
20624 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20625 rtx in)
20627 int regno;
20629 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20630 #if TARGET_MACHO
20631 && MACHOPIC_INDIRECT
20632 #endif
20635 /* We cannot copy a symbolic operand directly into anything
20636 other than BASE_REGS for TARGET_ELF. So indicate that a
20637 register from BASE_REGS is needed as an intermediate
20638 register.
20640 On Darwin, pic addresses require a load from memory, which
20641 needs a base register. */
20642 if (rclass != BASE_REGS
20643 && (GET_CODE (in) == SYMBOL_REF
20644 || GET_CODE (in) == HIGH
20645 || GET_CODE (in) == LABEL_REF
20646 || GET_CODE (in) == CONST))
20647 return BASE_REGS;
20650 if (GET_CODE (in) == REG)
20652 regno = REGNO (in);
20653 if (regno >= FIRST_PSEUDO_REGISTER)
20655 regno = true_regnum (in);
20656 if (regno >= FIRST_PSEUDO_REGISTER)
20657 regno = -1;
20660 else if (GET_CODE (in) == SUBREG)
20662 regno = true_regnum (in);
20663 if (regno >= FIRST_PSEUDO_REGISTER)
20664 regno = -1;
20666 else
20667 regno = -1;
20669 /* If we have VSX register moves, prefer moving scalar values between
20670 Altivec registers and GPR by going via an FPR (and then via memory)
20671 instead of reloading the secondary memory address for Altivec moves. */
20672 if (TARGET_VSX
20673 && GET_MODE_SIZE (mode) < 16
20674 && !mode_supports_vmx_dform (mode)
20675 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20676 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20677 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20678 && (regno >= 0 && INT_REGNO_P (regno)))))
20679 return FLOAT_REGS;
20681 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20682 into anything. */
20683 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20684 || (regno >= 0 && INT_REGNO_P (regno)))
20685 return NO_REGS;
20687 /* Constants, memory, and VSX registers can go into VSX registers (both the
20688 traditional floating point and the altivec registers). */
20689 if (rclass == VSX_REGS
20690 && (regno == -1 || VSX_REGNO_P (regno)))
20691 return NO_REGS;
20693 /* Constants, memory, and FP registers can go into FP registers. */
20694 if ((regno == -1 || FP_REGNO_P (regno))
20695 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20696 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20698 /* Memory, and AltiVec registers can go into AltiVec registers. */
20699 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20700 && rclass == ALTIVEC_REGS)
20701 return NO_REGS;
20703 /* We can copy among the CR registers. */
20704 if ((rclass == CR_REGS || rclass == CR0_REGS)
20705 && regno >= 0 && CR_REGNO_P (regno))
20706 return NO_REGS;
20708 /* Otherwise, we need GENERAL_REGS. */
20709 return GENERAL_REGS;
20712 /* Debug version of rs6000_secondary_reload_class. */
20713 static enum reg_class
20714 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20715 machine_mode mode, rtx in)
20717 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20718 fprintf (stderr,
20719 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20720 "mode = %s, input rtx:\n",
20721 reg_class_names[ret], reg_class_names[rclass],
20722 GET_MODE_NAME (mode));
20723 debug_rtx (in);
20725 return ret;
20728 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20730 static bool
20731 rs6000_can_change_mode_class (machine_mode from,
20732 machine_mode to,
20733 reg_class_t rclass)
20735 unsigned from_size = GET_MODE_SIZE (from);
20736 unsigned to_size = GET_MODE_SIZE (to);
20738 if (from_size != to_size)
20740 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20742 if (reg_classes_intersect_p (xclass, rclass))
20744 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20745 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20746 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20747 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20749 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20750 single register under VSX because the scalar part of the register
20751 is in the upper 64-bits, and not the lower 64-bits. Types like
20752 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20753 IEEE floating point can't overlap, and neither can small
20754 values. */
20756 if (to_float128_vector_p && from_float128_vector_p)
20757 return true;
20759 else if (to_float128_vector_p || from_float128_vector_p)
20760 return false;
20762 /* TDmode in floating-mode registers must always go into a register
20763 pair with the most significant word in the even-numbered register
20764 to match ISA requirements. In little-endian mode, this does not
20765 match subreg numbering, so we cannot allow subregs. */
20766 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20767 return false;
20769 if (from_size < 8 || to_size < 8)
20770 return false;
20772 if (from_size == 8 && (8 * to_nregs) != to_size)
20773 return false;
20775 if (to_size == 8 && (8 * from_nregs) != from_size)
20776 return false;
20778 return true;
20780 else
20781 return true;
20784 /* Since the VSX register set includes traditional floating point registers
20785 and altivec registers, just check for the size being different instead of
20786 trying to check whether the modes are vector modes. Otherwise it won't
20787 allow say DF and DI to change classes. For types like TFmode and TDmode
20788 that take 2 64-bit registers, rather than a single 128-bit register, don't
20789 allow subregs of those types to other 128 bit types. */
20790 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20792 unsigned num_regs = (from_size + 15) / 16;
20793 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20794 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20795 return false;
20797 return (from_size == 8 || from_size == 16);
20800 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20801 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20802 return false;
20804 return true;
20807 /* Debug version of rs6000_can_change_mode_class. */
20808 static bool
20809 rs6000_debug_can_change_mode_class (machine_mode from,
20810 machine_mode to,
20811 reg_class_t rclass)
20813 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20815 fprintf (stderr,
20816 "rs6000_can_change_mode_class, return %s, from = %s, "
20817 "to = %s, rclass = %s\n",
20818 ret ? "true" : "false",
20819 GET_MODE_NAME (from), GET_MODE_NAME (to),
20820 reg_class_names[rclass]);
20822 return ret;
20825 /* Return a string to do a move operation of 128 bits of data. */
20827 const char *
20828 rs6000_output_move_128bit (rtx operands[])
20830 rtx dest = operands[0];
20831 rtx src = operands[1];
20832 machine_mode mode = GET_MODE (dest);
20833 int dest_regno;
20834 int src_regno;
20835 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20836 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20838 if (REG_P (dest))
20840 dest_regno = REGNO (dest);
20841 dest_gpr_p = INT_REGNO_P (dest_regno);
20842 dest_fp_p = FP_REGNO_P (dest_regno);
20843 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20844 dest_vsx_p = dest_fp_p | dest_vmx_p;
20846 else
20848 dest_regno = -1;
20849 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20852 if (REG_P (src))
20854 src_regno = REGNO (src);
20855 src_gpr_p = INT_REGNO_P (src_regno);
20856 src_fp_p = FP_REGNO_P (src_regno);
20857 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20858 src_vsx_p = src_fp_p | src_vmx_p;
20860 else
20862 src_regno = -1;
20863 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20866 /* Register moves. */
20867 if (dest_regno >= 0 && src_regno >= 0)
20869 if (dest_gpr_p)
20871 if (src_gpr_p)
20872 return "#";
20874 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20875 return (WORDS_BIG_ENDIAN
20876 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20877 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20879 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20880 return "#";
20883 else if (TARGET_VSX && dest_vsx_p)
20885 if (src_vsx_p)
20886 return "xxlor %x0,%x1,%x1";
20888 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20889 return (WORDS_BIG_ENDIAN
20890 ? "mtvsrdd %x0,%1,%L1"
20891 : "mtvsrdd %x0,%L1,%1");
20893 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20894 return "#";
20897 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20898 return "vor %0,%1,%1";
20900 else if (dest_fp_p && src_fp_p)
20901 return "#";
20904 /* Loads. */
20905 else if (dest_regno >= 0 && MEM_P (src))
20907 if (dest_gpr_p)
20909 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20910 return "lq %0,%1";
20911 else
20912 return "#";
20915 else if (TARGET_ALTIVEC && dest_vmx_p
20916 && altivec_indexed_or_indirect_operand (src, mode))
20917 return "lvx %0,%y1";
20919 else if (TARGET_VSX && dest_vsx_p)
20921 if (mode_supports_vsx_dform_quad (mode)
20922 && quad_address_p (XEXP (src, 0), mode, true))
20923 return "lxv %x0,%1";
20925 else if (TARGET_P9_VECTOR)
20926 return "lxvx %x0,%y1";
20928 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20929 return "lxvw4x %x0,%y1";
20931 else
20932 return "lxvd2x %x0,%y1";
20935 else if (TARGET_ALTIVEC && dest_vmx_p)
20936 return "lvx %0,%y1";
20938 else if (dest_fp_p)
20939 return "#";
20942 /* Stores. */
20943 else if (src_regno >= 0 && MEM_P (dest))
20945 if (src_gpr_p)
20947 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20948 return "stq %1,%0";
20949 else
20950 return "#";
20953 else if (TARGET_ALTIVEC && src_vmx_p
20954 && altivec_indexed_or_indirect_operand (src, mode))
20955 return "stvx %1,%y0";
20957 else if (TARGET_VSX && src_vsx_p)
20959 if (mode_supports_vsx_dform_quad (mode)
20960 && quad_address_p (XEXP (dest, 0), mode, true))
20961 return "stxv %x1,%0";
20963 else if (TARGET_P9_VECTOR)
20964 return "stxvx %x1,%y0";
20966 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20967 return "stxvw4x %x1,%y0";
20969 else
20970 return "stxvd2x %x1,%y0";
20973 else if (TARGET_ALTIVEC && src_vmx_p)
20974 return "stvx %1,%y0";
20976 else if (src_fp_p)
20977 return "#";
20980 /* Constants. */
20981 else if (dest_regno >= 0
20982 && (GET_CODE (src) == CONST_INT
20983 || GET_CODE (src) == CONST_WIDE_INT
20984 || GET_CODE (src) == CONST_DOUBLE
20985 || GET_CODE (src) == CONST_VECTOR))
20987 if (dest_gpr_p)
20988 return "#";
20990 else if ((dest_vmx_p && TARGET_ALTIVEC)
20991 || (dest_vsx_p && TARGET_VSX))
20992 return output_vec_const_move (operands);
20995 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20998 /* Validate a 128-bit move. */
20999 bool
21000 rs6000_move_128bit_ok_p (rtx operands[])
21002 machine_mode mode = GET_MODE (operands[0]);
21003 return (gpc_reg_operand (operands[0], mode)
21004 || gpc_reg_operand (operands[1], mode));
21007 /* Return true if a 128-bit move needs to be split. */
21008 bool
21009 rs6000_split_128bit_ok_p (rtx operands[])
21011 if (!reload_completed)
21012 return false;
21014 if (!gpr_or_gpr_p (operands[0], operands[1]))
21015 return false;
21017 if (quad_load_store_p (operands[0], operands[1]))
21018 return false;
21020 return true;
21024 /* Given a comparison operation, return the bit number in CCR to test. We
21025 know this is a valid comparison.
21027 SCC_P is 1 if this is for an scc. That means that %D will have been
21028 used instead of %C, so the bits will be in different places.
21030 Return -1 if OP isn't a valid comparison for some reason. */
21033 ccr_bit (rtx op, int scc_p)
21035 enum rtx_code code = GET_CODE (op);
21036 machine_mode cc_mode;
21037 int cc_regnum;
21038 int base_bit;
21039 rtx reg;
21041 if (!COMPARISON_P (op))
21042 return -1;
21044 reg = XEXP (op, 0);
21046 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
21048 cc_mode = GET_MODE (reg);
21049 cc_regnum = REGNO (reg);
21050 base_bit = 4 * (cc_regnum - CR0_REGNO);
21052 validate_condition_mode (code, cc_mode);
21054 /* When generating a sCOND operation, only positive conditions are
21055 allowed. */
21056 gcc_assert (!scc_p
21057 || code == EQ || code == GT || code == LT || code == UNORDERED
21058 || code == GTU || code == LTU);
21060 switch (code)
21062 case NE:
21063 return scc_p ? base_bit + 3 : base_bit + 2;
21064 case EQ:
21065 return base_bit + 2;
21066 case GT: case GTU: case UNLE:
21067 return base_bit + 1;
21068 case LT: case LTU: case UNGE:
21069 return base_bit;
21070 case ORDERED: case UNORDERED:
21071 return base_bit + 3;
21073 case GE: case GEU:
21074 /* If scc, we will have done a cror to put the bit in the
21075 unordered position. So test that bit. For integer, this is ! LT
21076 unless this is an scc insn. */
21077 return scc_p ? base_bit + 3 : base_bit;
21079 case LE: case LEU:
21080 return scc_p ? base_bit + 3 : base_bit + 1;
21082 default:
21083 gcc_unreachable ();
21087 /* Return the GOT register. */
21090 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21092 /* The second flow pass currently (June 1999) can't update
21093 regs_ever_live without disturbing other parts of the compiler, so
21094 update it here to make the prolog/epilogue code happy. */
21095 if (!can_create_pseudo_p ()
21096 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21097 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21099 crtl->uses_pic_offset_table = 1;
21101 return pic_offset_table_rtx;
21104 static rs6000_stack_t stack_info;
21106 /* Function to init struct machine_function.
21107 This will be called, via a pointer variable,
21108 from push_function_context. */
21110 static struct machine_function *
21111 rs6000_init_machine_status (void)
21113 stack_info.reload_completed = 0;
21114 return ggc_cleared_alloc<machine_function> ();
21117 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21119 /* Write out a function code label. */
21121 void
21122 rs6000_output_function_entry (FILE *file, const char *fname)
21124 if (fname[0] != '.')
21126 switch (DEFAULT_ABI)
21128 default:
21129 gcc_unreachable ();
21131 case ABI_AIX:
21132 if (DOT_SYMBOLS)
21133 putc ('.', file);
21134 else
21135 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21136 break;
21138 case ABI_ELFv2:
21139 case ABI_V4:
21140 case ABI_DARWIN:
21141 break;
21145 RS6000_OUTPUT_BASENAME (file, fname);
21148 /* Print an operand. Recognize special options, documented below. */
21150 #if TARGET_ELF
21151 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21152 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21153 #else
21154 #define SMALL_DATA_RELOC "sda21"
21155 #define SMALL_DATA_REG 0
21156 #endif
21158 void
21159 print_operand (FILE *file, rtx x, int code)
21161 int i;
21162 unsigned HOST_WIDE_INT uval;
21164 switch (code)
21166 /* %a is output_address. */
21168 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21169 output_operand. */
21171 case 'D':
21172 /* Like 'J' but get to the GT bit only. */
21173 gcc_assert (REG_P (x));
21175 /* Bit 1 is GT bit. */
21176 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21178 /* Add one for shift count in rlinm for scc. */
21179 fprintf (file, "%d", i + 1);
21180 return;
21182 case 'e':
21183 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21184 if (! INT_P (x))
21186 output_operand_lossage ("invalid %%e value");
21187 return;
21190 uval = INTVAL (x);
21191 if ((uval & 0xffff) == 0 && uval != 0)
21192 putc ('s', file);
21193 return;
21195 case 'E':
21196 /* X is a CR register. Print the number of the EQ bit of the CR */
21197 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21198 output_operand_lossage ("invalid %%E value");
21199 else
21200 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21201 return;
21203 case 'f':
21204 /* X is a CR register. Print the shift count needed to move it
21205 to the high-order four bits. */
21206 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21207 output_operand_lossage ("invalid %%f value");
21208 else
21209 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21210 return;
21212 case 'F':
21213 /* Similar, but print the count for the rotate in the opposite
21214 direction. */
21215 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21216 output_operand_lossage ("invalid %%F value");
21217 else
21218 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21219 return;
21221 case 'G':
21222 /* X is a constant integer. If it is negative, print "m",
21223 otherwise print "z". This is to make an aze or ame insn. */
21224 if (GET_CODE (x) != CONST_INT)
21225 output_operand_lossage ("invalid %%G value");
21226 else if (INTVAL (x) >= 0)
21227 putc ('z', file);
21228 else
21229 putc ('m', file);
21230 return;
21232 case 'h':
21233 /* If constant, output low-order five bits. Otherwise, write
21234 normally. */
21235 if (INT_P (x))
21236 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21237 else
21238 print_operand (file, x, 0);
21239 return;
21241 case 'H':
21242 /* If constant, output low-order six bits. Otherwise, write
21243 normally. */
21244 if (INT_P (x))
21245 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21246 else
21247 print_operand (file, x, 0);
21248 return;
21250 case 'I':
21251 /* Print `i' if this is a constant, else nothing. */
21252 if (INT_P (x))
21253 putc ('i', file);
21254 return;
21256 case 'j':
21257 /* Write the bit number in CCR for jump. */
21258 i = ccr_bit (x, 0);
21259 if (i == -1)
21260 output_operand_lossage ("invalid %%j code");
21261 else
21262 fprintf (file, "%d", i);
21263 return;
21265 case 'J':
21266 /* Similar, but add one for shift count in rlinm for scc and pass
21267 scc flag to `ccr_bit'. */
21268 i = ccr_bit (x, 1);
21269 if (i == -1)
21270 output_operand_lossage ("invalid %%J code");
21271 else
21272 /* If we want bit 31, write a shift count of zero, not 32. */
21273 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21274 return;
21276 case 'k':
21277 /* X must be a constant. Write the 1's complement of the
21278 constant. */
21279 if (! INT_P (x))
21280 output_operand_lossage ("invalid %%k value");
21281 else
21282 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21283 return;
21285 case 'K':
21286 /* X must be a symbolic constant on ELF. Write an
21287 expression suitable for an 'addi' that adds in the low 16
21288 bits of the MEM. */
21289 if (GET_CODE (x) == CONST)
21291 if (GET_CODE (XEXP (x, 0)) != PLUS
21292 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21293 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21294 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21295 output_operand_lossage ("invalid %%K value");
21297 print_operand_address (file, x);
21298 fputs ("@l", file);
21299 return;
21301 /* %l is output_asm_label. */
21303 case 'L':
21304 /* Write second word of DImode or DFmode reference. Works on register
21305 or non-indexed memory only. */
21306 if (REG_P (x))
21307 fputs (reg_names[REGNO (x) + 1], file);
21308 else if (MEM_P (x))
21310 machine_mode mode = GET_MODE (x);
21311 /* Handle possible auto-increment. Since it is pre-increment and
21312 we have already done it, we can just use an offset of word. */
21313 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21314 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21315 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21316 UNITS_PER_WORD));
21317 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21318 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21319 UNITS_PER_WORD));
21320 else
21321 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21322 UNITS_PER_WORD),
21323 0));
21325 if (small_data_operand (x, GET_MODE (x)))
21326 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21327 reg_names[SMALL_DATA_REG]);
21329 return;
21331 case 'N':
21332 /* Write the number of elements in the vector times 4. */
21333 if (GET_CODE (x) != PARALLEL)
21334 output_operand_lossage ("invalid %%N value");
21335 else
21336 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21337 return;
21339 case 'O':
21340 /* Similar, but subtract 1 first. */
21341 if (GET_CODE (x) != PARALLEL)
21342 output_operand_lossage ("invalid %%O value");
21343 else
21344 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21345 return;
21347 case 'p':
21348 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21349 if (! INT_P (x)
21350 || INTVAL (x) < 0
21351 || (i = exact_log2 (INTVAL (x))) < 0)
21352 output_operand_lossage ("invalid %%p value");
21353 else
21354 fprintf (file, "%d", i);
21355 return;
21357 case 'P':
21358 /* The operand must be an indirect memory reference. The result
21359 is the register name. */
21360 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21361 || REGNO (XEXP (x, 0)) >= 32)
21362 output_operand_lossage ("invalid %%P value");
21363 else
21364 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21365 return;
21367 case 'q':
21368 /* This outputs the logical code corresponding to a boolean
21369 expression. The expression may have one or both operands
21370 negated (if one, only the first one). For condition register
21371 logical operations, it will also treat the negated
21372 CR codes as NOTs, but not handle NOTs of them. */
21374 const char *const *t = 0;
21375 const char *s;
21376 enum rtx_code code = GET_CODE (x);
21377 static const char * const tbl[3][3] = {
21378 { "and", "andc", "nor" },
21379 { "or", "orc", "nand" },
21380 { "xor", "eqv", "xor" } };
21382 if (code == AND)
21383 t = tbl[0];
21384 else if (code == IOR)
21385 t = tbl[1];
21386 else if (code == XOR)
21387 t = tbl[2];
21388 else
21389 output_operand_lossage ("invalid %%q value");
21391 if (GET_CODE (XEXP (x, 0)) != NOT)
21392 s = t[0];
21393 else
21395 if (GET_CODE (XEXP (x, 1)) == NOT)
21396 s = t[2];
21397 else
21398 s = t[1];
21401 fputs (s, file);
21403 return;
21405 case 'Q':
21406 if (! TARGET_MFCRF)
21407 return;
21408 fputc (',', file);
21409 /* FALLTHRU */
21411 case 'R':
21412 /* X is a CR register. Print the mask for `mtcrf'. */
21413 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21414 output_operand_lossage ("invalid %%R value");
21415 else
21416 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21417 return;
21419 case 's':
21420 /* Low 5 bits of 32 - value */
21421 if (! INT_P (x))
21422 output_operand_lossage ("invalid %%s value");
21423 else
21424 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21425 return;
21427 case 't':
21428 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21429 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21431 /* Bit 3 is OV bit. */
21432 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21434 /* If we want bit 31, write a shift count of zero, not 32. */
21435 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21436 return;
21438 case 'T':
21439 /* Print the symbolic name of a branch target register. */
21440 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21441 && REGNO (x) != CTR_REGNO))
21442 output_operand_lossage ("invalid %%T value");
21443 else if (REGNO (x) == LR_REGNO)
21444 fputs ("lr", file);
21445 else
21446 fputs ("ctr", file);
21447 return;
21449 case 'u':
21450 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21451 for use in unsigned operand. */
21452 if (! INT_P (x))
21454 output_operand_lossage ("invalid %%u value");
21455 return;
21458 uval = INTVAL (x);
21459 if ((uval & 0xffff) == 0)
21460 uval >>= 16;
21462 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21463 return;
21465 case 'v':
21466 /* High-order 16 bits of constant for use in signed operand. */
21467 if (! INT_P (x))
21468 output_operand_lossage ("invalid %%v value");
21469 else
21470 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21471 (INTVAL (x) >> 16) & 0xffff);
21472 return;
21474 case 'U':
21475 /* Print `u' if this has an auto-increment or auto-decrement. */
21476 if (MEM_P (x)
21477 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21478 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21479 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21480 putc ('u', file);
21481 return;
21483 case 'V':
21484 /* Print the trap code for this operand. */
21485 switch (GET_CODE (x))
21487 case EQ:
21488 fputs ("eq", file); /* 4 */
21489 break;
21490 case NE:
21491 fputs ("ne", file); /* 24 */
21492 break;
21493 case LT:
21494 fputs ("lt", file); /* 16 */
21495 break;
21496 case LE:
21497 fputs ("le", file); /* 20 */
21498 break;
21499 case GT:
21500 fputs ("gt", file); /* 8 */
21501 break;
21502 case GE:
21503 fputs ("ge", file); /* 12 */
21504 break;
21505 case LTU:
21506 fputs ("llt", file); /* 2 */
21507 break;
21508 case LEU:
21509 fputs ("lle", file); /* 6 */
21510 break;
21511 case GTU:
21512 fputs ("lgt", file); /* 1 */
21513 break;
21514 case GEU:
21515 fputs ("lge", file); /* 5 */
21516 break;
21517 default:
21518 gcc_unreachable ();
21520 break;
21522 case 'w':
21523 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21524 normally. */
21525 if (INT_P (x))
21526 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21527 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21528 else
21529 print_operand (file, x, 0);
21530 return;
21532 case 'x':
21533 /* X is a FPR or Altivec register used in a VSX context. */
21534 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21535 output_operand_lossage ("invalid %%x value");
21536 else
21538 int reg = REGNO (x);
21539 int vsx_reg = (FP_REGNO_P (reg)
21540 ? reg - 32
21541 : reg - FIRST_ALTIVEC_REGNO + 32);
21543 #ifdef TARGET_REGNAMES
21544 if (TARGET_REGNAMES)
21545 fprintf (file, "%%vs%d", vsx_reg);
21546 else
21547 #endif
21548 fprintf (file, "%d", vsx_reg);
21550 return;
21552 case 'X':
21553 if (MEM_P (x)
21554 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21555 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21556 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21557 putc ('x', file);
21558 return;
21560 case 'Y':
21561 /* Like 'L', for third word of TImode/PTImode */
21562 if (REG_P (x))
21563 fputs (reg_names[REGNO (x) + 2], file);
21564 else if (MEM_P (x))
21566 machine_mode mode = GET_MODE (x);
21567 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21568 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21569 output_address (mode, plus_constant (Pmode,
21570 XEXP (XEXP (x, 0), 0), 8));
21571 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21572 output_address (mode, plus_constant (Pmode,
21573 XEXP (XEXP (x, 0), 0), 8));
21574 else
21575 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21576 if (small_data_operand (x, GET_MODE (x)))
21577 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21578 reg_names[SMALL_DATA_REG]);
21580 return;
21582 case 'z':
21583 /* X is a SYMBOL_REF. Write out the name preceded by a
21584 period and without any trailing data in brackets. Used for function
21585 names. If we are configured for System V (or the embedded ABI) on
21586 the PowerPC, do not emit the period, since those systems do not use
21587 TOCs and the like. */
21588 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21590 /* For macho, check to see if we need a stub. */
21591 if (TARGET_MACHO)
21593 const char *name = XSTR (x, 0);
21594 #if TARGET_MACHO
21595 if (darwin_emit_branch_islands
21596 && MACHOPIC_INDIRECT
21597 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21598 name = machopic_indirection_name (x, /*stub_p=*/true);
21599 #endif
21600 assemble_name (file, name);
21602 else if (!DOT_SYMBOLS)
21603 assemble_name (file, XSTR (x, 0));
21604 else
21605 rs6000_output_function_entry (file, XSTR (x, 0));
21606 return;
21608 case 'Z':
21609 /* Like 'L', for last word of TImode/PTImode. */
21610 if (REG_P (x))
21611 fputs (reg_names[REGNO (x) + 3], file);
21612 else if (MEM_P (x))
21614 machine_mode mode = GET_MODE (x);
21615 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21616 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21617 output_address (mode, plus_constant (Pmode,
21618 XEXP (XEXP (x, 0), 0), 12));
21619 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21620 output_address (mode, plus_constant (Pmode,
21621 XEXP (XEXP (x, 0), 0), 12));
21622 else
21623 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21624 if (small_data_operand (x, GET_MODE (x)))
21625 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21626 reg_names[SMALL_DATA_REG]);
21628 return;
21630 /* Print AltiVec memory operand. */
21631 case 'y':
21633 rtx tmp;
21635 gcc_assert (MEM_P (x));
21637 tmp = XEXP (x, 0);
21639 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21640 && GET_CODE (tmp) == AND
21641 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21642 && INTVAL (XEXP (tmp, 1)) == -16)
21643 tmp = XEXP (tmp, 0);
21644 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21645 && GET_CODE (tmp) == PRE_MODIFY)
21646 tmp = XEXP (tmp, 1);
21647 if (REG_P (tmp))
21648 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21649 else
21651 if (GET_CODE (tmp) != PLUS
21652 || !REG_P (XEXP (tmp, 0))
21653 || !REG_P (XEXP (tmp, 1)))
21655 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21656 break;
21659 if (REGNO (XEXP (tmp, 0)) == 0)
21660 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21661 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21662 else
21663 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21664 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21666 break;
21669 case 0:
21670 if (REG_P (x))
21671 fprintf (file, "%s", reg_names[REGNO (x)]);
21672 else if (MEM_P (x))
21674 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21675 know the width from the mode. */
21676 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21677 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21678 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21679 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21680 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21681 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21682 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21683 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21684 else
21685 output_address (GET_MODE (x), XEXP (x, 0));
21687 else
21689 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21690 /* This hack along with a corresponding hack in
21691 rs6000_output_addr_const_extra arranges to output addends
21692 where the assembler expects to find them. eg.
21693 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21694 without this hack would be output as "x@toc+4". We
21695 want "x+4@toc". */
21696 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21697 else
21698 output_addr_const (file, x);
21700 return;
21702 case '&':
21703 if (const char *name = get_some_local_dynamic_name ())
21704 assemble_name (file, name);
21705 else
21706 output_operand_lossage ("'%%&' used without any "
21707 "local dynamic TLS references");
21708 return;
21710 default:
21711 output_operand_lossage ("invalid %%xn code");
21715 /* Print the address of an operand. */
21717 void
21718 print_operand_address (FILE *file, rtx x)
21720 if (REG_P (x))
21721 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21722 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21723 || GET_CODE (x) == LABEL_REF)
21725 output_addr_const (file, x);
21726 if (small_data_operand (x, GET_MODE (x)))
21727 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21728 reg_names[SMALL_DATA_REG]);
21729 else
21730 gcc_assert (!TARGET_TOC);
21732 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21733 && REG_P (XEXP (x, 1)))
21735 if (REGNO (XEXP (x, 0)) == 0)
21736 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21737 reg_names[ REGNO (XEXP (x, 0)) ]);
21738 else
21739 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21740 reg_names[ REGNO (XEXP (x, 1)) ]);
21742 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21743 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21744 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21745 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21746 #if TARGET_MACHO
21747 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21748 && CONSTANT_P (XEXP (x, 1)))
21750 fprintf (file, "lo16(");
21751 output_addr_const (file, XEXP (x, 1));
21752 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21754 #endif
21755 #if TARGET_ELF
21756 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21757 && CONSTANT_P (XEXP (x, 1)))
21759 output_addr_const (file, XEXP (x, 1));
21760 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21762 #endif
21763 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21765 /* This hack along with a corresponding hack in
21766 rs6000_output_addr_const_extra arranges to output addends
21767 where the assembler expects to find them. eg.
21768 (lo_sum (reg 9)
21769 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21770 without this hack would be output as "x@toc+8@l(9)". We
21771 want "x+8@toc@l(9)". */
21772 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21773 if (GET_CODE (x) == LO_SUM)
21774 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21775 else
21776 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21778 else
21779 gcc_unreachable ();
21782 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21784 static bool
21785 rs6000_output_addr_const_extra (FILE *file, rtx x)
21787 if (GET_CODE (x) == UNSPEC)
21788 switch (XINT (x, 1))
21790 case UNSPEC_TOCREL:
21791 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21792 && REG_P (XVECEXP (x, 0, 1))
21793 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21794 output_addr_const (file, XVECEXP (x, 0, 0));
21795 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21797 if (INTVAL (tocrel_offset_oac) >= 0)
21798 fprintf (file, "+");
21799 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21801 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21803 putc ('-', file);
21804 assemble_name (file, toc_label_name);
21805 need_toc_init = 1;
21807 else if (TARGET_ELF)
21808 fputs ("@toc", file);
21809 return true;
21811 #if TARGET_MACHO
21812 case UNSPEC_MACHOPIC_OFFSET:
21813 output_addr_const (file, XVECEXP (x, 0, 0));
21814 putc ('-', file);
21815 machopic_output_function_base_name (file);
21816 return true;
21817 #endif
21819 return false;
21822 /* Target hook for assembling integer objects. The PowerPC version has
21823 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21824 is defined. It also needs to handle DI-mode objects on 64-bit
21825 targets. */
21827 static bool
21828 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21830 #ifdef RELOCATABLE_NEEDS_FIXUP
21831 /* Special handling for SI values. */
21832 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21834 static int recurse = 0;
21836 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21837 the .fixup section. Since the TOC section is already relocated, we
21838 don't need to mark it here. We used to skip the text section, but it
21839 should never be valid for relocated addresses to be placed in the text
21840 section. */
21841 if (DEFAULT_ABI == ABI_V4
21842 && (TARGET_RELOCATABLE || flag_pic > 1)
21843 && in_section != toc_section
21844 && !recurse
21845 && !CONST_SCALAR_INT_P (x)
21846 && CONSTANT_P (x))
21848 char buf[256];
21850 recurse = 1;
21851 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21852 fixuplabelno++;
21853 ASM_OUTPUT_LABEL (asm_out_file, buf);
21854 fprintf (asm_out_file, "\t.long\t(");
21855 output_addr_const (asm_out_file, x);
21856 fprintf (asm_out_file, ")@fixup\n");
21857 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21858 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21859 fprintf (asm_out_file, "\t.long\t");
21860 assemble_name (asm_out_file, buf);
21861 fprintf (asm_out_file, "\n\t.previous\n");
21862 recurse = 0;
21863 return true;
21865 /* Remove initial .'s to turn a -mcall-aixdesc function
21866 address into the address of the descriptor, not the function
21867 itself. */
21868 else if (GET_CODE (x) == SYMBOL_REF
21869 && XSTR (x, 0)[0] == '.'
21870 && DEFAULT_ABI == ABI_AIX)
21872 const char *name = XSTR (x, 0);
21873 while (*name == '.')
21874 name++;
21876 fprintf (asm_out_file, "\t.long\t%s\n", name);
21877 return true;
21880 #endif /* RELOCATABLE_NEEDS_FIXUP */
21881 return default_assemble_integer (x, size, aligned_p);
21884 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21885 /* Emit an assembler directive to set symbol visibility for DECL to
21886 VISIBILITY_TYPE. */
21888 static void
21889 rs6000_assemble_visibility (tree decl, int vis)
21891 if (TARGET_XCOFF)
21892 return;
21894 /* Functions need to have their entry point symbol visibility set as
21895 well as their descriptor symbol visibility. */
21896 if (DEFAULT_ABI == ABI_AIX
21897 && DOT_SYMBOLS
21898 && TREE_CODE (decl) == FUNCTION_DECL)
21900 static const char * const visibility_types[] = {
21901 NULL, "protected", "hidden", "internal"
21904 const char *name, *type;
21906 name = ((* targetm.strip_name_encoding)
21907 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21908 type = visibility_types[vis];
21910 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21911 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21913 else
21914 default_assemble_visibility (decl, vis);
21916 #endif
21918 enum rtx_code
21919 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21921 /* Reversal of FP compares takes care -- an ordered compare
21922 becomes an unordered compare and vice versa. */
21923 if (mode == CCFPmode
21924 && (!flag_finite_math_only
21925 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21926 || code == UNEQ || code == LTGT))
21927 return reverse_condition_maybe_unordered (code);
21928 else
21929 return reverse_condition (code);
21932 /* Generate a compare for CODE. Return a brand-new rtx that
21933 represents the result of the compare. */
21935 static rtx
21936 rs6000_generate_compare (rtx cmp, machine_mode mode)
21938 machine_mode comp_mode;
21939 rtx compare_result;
21940 enum rtx_code code = GET_CODE (cmp);
21941 rtx op0 = XEXP (cmp, 0);
21942 rtx op1 = XEXP (cmp, 1);
21944 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21945 comp_mode = CCmode;
21946 else if (FLOAT_MODE_P (mode))
21947 comp_mode = CCFPmode;
21948 else if (code == GTU || code == LTU
21949 || code == GEU || code == LEU)
21950 comp_mode = CCUNSmode;
21951 else if ((code == EQ || code == NE)
21952 && unsigned_reg_p (op0)
21953 && (unsigned_reg_p (op1)
21954 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21955 /* These are unsigned values, perhaps there will be a later
21956 ordering compare that can be shared with this one. */
21957 comp_mode = CCUNSmode;
21958 else
21959 comp_mode = CCmode;
21961 /* If we have an unsigned compare, make sure we don't have a signed value as
21962 an immediate. */
21963 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21964 && INTVAL (op1) < 0)
21966 op0 = copy_rtx_if_shared (op0);
21967 op1 = force_reg (GET_MODE (op0), op1);
21968 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21971 /* First, the compare. */
21972 compare_result = gen_reg_rtx (comp_mode);
21974 /* IEEE 128-bit support in VSX registers when we do not have hardware
21975 support. */
21976 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21978 rtx libfunc = NULL_RTX;
21979 bool check_nan = false;
21980 rtx dest;
21982 switch (code)
21984 case EQ:
21985 case NE:
21986 libfunc = optab_libfunc (eq_optab, mode);
21987 break;
21989 case GT:
21990 case GE:
21991 libfunc = optab_libfunc (ge_optab, mode);
21992 break;
21994 case LT:
21995 case LE:
21996 libfunc = optab_libfunc (le_optab, mode);
21997 break;
21999 case UNORDERED:
22000 case ORDERED:
22001 libfunc = optab_libfunc (unord_optab, mode);
22002 code = (code == UNORDERED) ? NE : EQ;
22003 break;
22005 case UNGE:
22006 case UNGT:
22007 check_nan = true;
22008 libfunc = optab_libfunc (ge_optab, mode);
22009 code = (code == UNGE) ? GE : GT;
22010 break;
22012 case UNLE:
22013 case UNLT:
22014 check_nan = true;
22015 libfunc = optab_libfunc (le_optab, mode);
22016 code = (code == UNLE) ? LE : LT;
22017 break;
22019 case UNEQ:
22020 case LTGT:
22021 check_nan = true;
22022 libfunc = optab_libfunc (eq_optab, mode);
22023 code = (code = UNEQ) ? EQ : NE;
22024 break;
22026 default:
22027 gcc_unreachable ();
22030 gcc_assert (libfunc);
22032 if (!check_nan)
22033 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22034 SImode, op0, mode, op1, mode);
22036 /* The library signals an exception for signalling NaNs, so we need to
22037 handle isgreater, etc. by first checking isordered. */
22038 else
22040 rtx ne_rtx, normal_dest, unord_dest;
22041 rtx unord_func = optab_libfunc (unord_optab, mode);
22042 rtx join_label = gen_label_rtx ();
22043 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22044 rtx unord_cmp = gen_reg_rtx (comp_mode);
22047 /* Test for either value being a NaN. */
22048 gcc_assert (unord_func);
22049 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22050 SImode, op0, mode, op1, mode);
22052 /* Set value (0) if either value is a NaN, and jump to the join
22053 label. */
22054 dest = gen_reg_rtx (SImode);
22055 emit_move_insn (dest, const1_rtx);
22056 emit_insn (gen_rtx_SET (unord_cmp,
22057 gen_rtx_COMPARE (comp_mode, unord_dest,
22058 const0_rtx)));
22060 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22061 emit_jump_insn (gen_rtx_SET (pc_rtx,
22062 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22063 join_ref,
22064 pc_rtx)));
22066 /* Do the normal comparison, knowing that the values are not
22067 NaNs. */
22068 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22069 SImode, op0, mode, op1, mode);
22071 emit_insn (gen_cstoresi4 (dest,
22072 gen_rtx_fmt_ee (code, SImode, normal_dest,
22073 const0_rtx),
22074 normal_dest, const0_rtx));
22076 /* Join NaN and non-Nan paths. Compare dest against 0. */
22077 emit_label (join_label);
22078 code = NE;
22081 emit_insn (gen_rtx_SET (compare_result,
22082 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22085 else
22087 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22088 CLOBBERs to match cmptf_internal2 pattern. */
22089 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22090 && FLOAT128_IBM_P (GET_MODE (op0))
22091 && TARGET_HARD_FLOAT)
22092 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22093 gen_rtvec (10,
22094 gen_rtx_SET (compare_result,
22095 gen_rtx_COMPARE (comp_mode, op0, op1)),
22096 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22097 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22098 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22099 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22100 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22101 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22102 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22103 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22104 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22105 else if (GET_CODE (op1) == UNSPEC
22106 && XINT (op1, 1) == UNSPEC_SP_TEST)
22108 rtx op1b = XVECEXP (op1, 0, 0);
22109 comp_mode = CCEQmode;
22110 compare_result = gen_reg_rtx (CCEQmode);
22111 if (TARGET_64BIT)
22112 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22113 else
22114 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22116 else
22117 emit_insn (gen_rtx_SET (compare_result,
22118 gen_rtx_COMPARE (comp_mode, op0, op1)));
22121 /* Some kinds of FP comparisons need an OR operation;
22122 under flag_finite_math_only we don't bother. */
22123 if (FLOAT_MODE_P (mode)
22124 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22125 && !flag_finite_math_only
22126 && (code == LE || code == GE
22127 || code == UNEQ || code == LTGT
22128 || code == UNGT || code == UNLT))
22130 enum rtx_code or1, or2;
22131 rtx or1_rtx, or2_rtx, compare2_rtx;
22132 rtx or_result = gen_reg_rtx (CCEQmode);
22134 switch (code)
22136 case LE: or1 = LT; or2 = EQ; break;
22137 case GE: or1 = GT; or2 = EQ; break;
22138 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22139 case LTGT: or1 = LT; or2 = GT; break;
22140 case UNGT: or1 = UNORDERED; or2 = GT; break;
22141 case UNLT: or1 = UNORDERED; or2 = LT; break;
22142 default: gcc_unreachable ();
22144 validate_condition_mode (or1, comp_mode);
22145 validate_condition_mode (or2, comp_mode);
22146 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22147 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22148 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22149 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22150 const_true_rtx);
22151 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22153 compare_result = or_result;
22154 code = EQ;
22157 validate_condition_mode (code, GET_MODE (compare_result));
22159 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22163 /* Return the diagnostic message string if the binary operation OP is
22164 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22166 static const char*
22167 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22168 const_tree type1,
22169 const_tree type2)
22171 machine_mode mode1 = TYPE_MODE (type1);
22172 machine_mode mode2 = TYPE_MODE (type2);
22174 /* For complex modes, use the inner type. */
22175 if (COMPLEX_MODE_P (mode1))
22176 mode1 = GET_MODE_INNER (mode1);
22178 if (COMPLEX_MODE_P (mode2))
22179 mode2 = GET_MODE_INNER (mode2);
22181 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22182 double to intermix unless -mfloat128-convert. */
22183 if (mode1 == mode2)
22184 return NULL;
22186 if (!TARGET_FLOAT128_CVT)
22188 if ((mode1 == KFmode && mode2 == IFmode)
22189 || (mode1 == IFmode && mode2 == KFmode))
22190 return N_("__float128 and __ibm128 cannot be used in the same "
22191 "expression");
22193 if (TARGET_IEEEQUAD
22194 && ((mode1 == IFmode && mode2 == TFmode)
22195 || (mode1 == TFmode && mode2 == IFmode)))
22196 return N_("__ibm128 and long double cannot be used in the same "
22197 "expression");
22199 if (!TARGET_IEEEQUAD
22200 && ((mode1 == KFmode && mode2 == TFmode)
22201 || (mode1 == TFmode && mode2 == KFmode)))
22202 return N_("__float128 and long double cannot be used in the same "
22203 "expression");
22206 return NULL;
22210 /* Expand floating point conversion to/from __float128 and __ibm128. */
22212 void
22213 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22215 machine_mode dest_mode = GET_MODE (dest);
22216 machine_mode src_mode = GET_MODE (src);
22217 convert_optab cvt = unknown_optab;
22218 bool do_move = false;
22219 rtx libfunc = NULL_RTX;
22220 rtx dest2;
22221 typedef rtx (*rtx_2func_t) (rtx, rtx);
22222 rtx_2func_t hw_convert = (rtx_2func_t)0;
22223 size_t kf_or_tf;
22225 struct hw_conv_t {
22226 rtx_2func_t from_df;
22227 rtx_2func_t from_sf;
22228 rtx_2func_t from_si_sign;
22229 rtx_2func_t from_si_uns;
22230 rtx_2func_t from_di_sign;
22231 rtx_2func_t from_di_uns;
22232 rtx_2func_t to_df;
22233 rtx_2func_t to_sf;
22234 rtx_2func_t to_si_sign;
22235 rtx_2func_t to_si_uns;
22236 rtx_2func_t to_di_sign;
22237 rtx_2func_t to_di_uns;
22238 } hw_conversions[2] = {
22239 /* convertions to/from KFmode */
22241 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22242 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22243 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22244 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22245 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22246 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22247 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22248 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22249 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22250 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22251 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22252 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22255 /* convertions to/from TFmode */
22257 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22258 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22259 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22260 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22261 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22262 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22263 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22264 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22265 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22266 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22267 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22268 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22272 if (dest_mode == src_mode)
22273 gcc_unreachable ();
22275 /* Eliminate memory operations. */
22276 if (MEM_P (src))
22277 src = force_reg (src_mode, src);
22279 if (MEM_P (dest))
22281 rtx tmp = gen_reg_rtx (dest_mode);
22282 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22283 rs6000_emit_move (dest, tmp, dest_mode);
22284 return;
22287 /* Convert to IEEE 128-bit floating point. */
22288 if (FLOAT128_IEEE_P (dest_mode))
22290 if (dest_mode == KFmode)
22291 kf_or_tf = 0;
22292 else if (dest_mode == TFmode)
22293 kf_or_tf = 1;
22294 else
22295 gcc_unreachable ();
22297 switch (src_mode)
22299 case E_DFmode:
22300 cvt = sext_optab;
22301 hw_convert = hw_conversions[kf_or_tf].from_df;
22302 break;
22304 case E_SFmode:
22305 cvt = sext_optab;
22306 hw_convert = hw_conversions[kf_or_tf].from_sf;
22307 break;
22309 case E_KFmode:
22310 case E_IFmode:
22311 case E_TFmode:
22312 if (FLOAT128_IBM_P (src_mode))
22313 cvt = sext_optab;
22314 else
22315 do_move = true;
22316 break;
22318 case E_SImode:
22319 if (unsigned_p)
22321 cvt = ufloat_optab;
22322 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22324 else
22326 cvt = sfloat_optab;
22327 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22329 break;
22331 case E_DImode:
22332 if (unsigned_p)
22334 cvt = ufloat_optab;
22335 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22337 else
22339 cvt = sfloat_optab;
22340 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22342 break;
22344 default:
22345 gcc_unreachable ();
22349 /* Convert from IEEE 128-bit floating point. */
22350 else if (FLOAT128_IEEE_P (src_mode))
22352 if (src_mode == KFmode)
22353 kf_or_tf = 0;
22354 else if (src_mode == TFmode)
22355 kf_or_tf = 1;
22356 else
22357 gcc_unreachable ();
22359 switch (dest_mode)
22361 case E_DFmode:
22362 cvt = trunc_optab;
22363 hw_convert = hw_conversions[kf_or_tf].to_df;
22364 break;
22366 case E_SFmode:
22367 cvt = trunc_optab;
22368 hw_convert = hw_conversions[kf_or_tf].to_sf;
22369 break;
22371 case E_KFmode:
22372 case E_IFmode:
22373 case E_TFmode:
22374 if (FLOAT128_IBM_P (dest_mode))
22375 cvt = trunc_optab;
22376 else
22377 do_move = true;
22378 break;
22380 case E_SImode:
22381 if (unsigned_p)
22383 cvt = ufix_optab;
22384 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22386 else
22388 cvt = sfix_optab;
22389 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22391 break;
22393 case E_DImode:
22394 if (unsigned_p)
22396 cvt = ufix_optab;
22397 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22399 else
22401 cvt = sfix_optab;
22402 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22404 break;
22406 default:
22407 gcc_unreachable ();
22411 /* Both IBM format. */
22412 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22413 do_move = true;
22415 else
22416 gcc_unreachable ();
22418 /* Handle conversion between TFmode/KFmode. */
22419 if (do_move)
22420 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22422 /* Handle conversion if we have hardware support. */
22423 else if (TARGET_FLOAT128_HW && hw_convert)
22424 emit_insn ((hw_convert) (dest, src));
22426 /* Call an external function to do the conversion. */
22427 else if (cvt != unknown_optab)
22429 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22430 gcc_assert (libfunc != NULL_RTX);
22432 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22433 src, src_mode);
22435 gcc_assert (dest2 != NULL_RTX);
22436 if (!rtx_equal_p (dest, dest2))
22437 emit_move_insn (dest, dest2);
22440 else
22441 gcc_unreachable ();
22443 return;
22447 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22448 can be used as that dest register. Return the dest register. */
22451 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22453 if (op2 == const0_rtx)
22454 return op1;
22456 if (GET_CODE (scratch) == SCRATCH)
22457 scratch = gen_reg_rtx (mode);
22459 if (logical_operand (op2, mode))
22460 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22461 else
22462 emit_insn (gen_rtx_SET (scratch,
22463 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22465 return scratch;
22468 void
22469 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22471 rtx condition_rtx;
22472 machine_mode op_mode;
22473 enum rtx_code cond_code;
22474 rtx result = operands[0];
22476 condition_rtx = rs6000_generate_compare (operands[1], mode);
22477 cond_code = GET_CODE (condition_rtx);
22479 if (cond_code == NE
22480 || cond_code == GE || cond_code == LE
22481 || cond_code == GEU || cond_code == LEU
22482 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22484 rtx not_result = gen_reg_rtx (CCEQmode);
22485 rtx not_op, rev_cond_rtx;
22486 machine_mode cc_mode;
22488 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22490 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22491 SImode, XEXP (condition_rtx, 0), const0_rtx);
22492 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22493 emit_insn (gen_rtx_SET (not_result, not_op));
22494 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22497 op_mode = GET_MODE (XEXP (operands[1], 0));
22498 if (op_mode == VOIDmode)
22499 op_mode = GET_MODE (XEXP (operands[1], 1));
22501 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22503 PUT_MODE (condition_rtx, DImode);
22504 convert_move (result, condition_rtx, 0);
22506 else
22508 PUT_MODE (condition_rtx, SImode);
22509 emit_insn (gen_rtx_SET (result, condition_rtx));
22513 /* Emit a branch of kind CODE to location LOC. */
22515 void
22516 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22518 rtx condition_rtx, loc_ref;
22520 condition_rtx = rs6000_generate_compare (operands[0], mode);
22521 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22522 emit_jump_insn (gen_rtx_SET (pc_rtx,
22523 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22524 loc_ref, pc_rtx)));
22527 /* Return the string to output a conditional branch to LABEL, which is
22528 the operand template of the label, or NULL if the branch is really a
22529 conditional return.
22531 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22532 condition code register and its mode specifies what kind of
22533 comparison we made.
22535 REVERSED is nonzero if we should reverse the sense of the comparison.
22537 INSN is the insn. */
22539 char *
22540 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22542 static char string[64];
22543 enum rtx_code code = GET_CODE (op);
22544 rtx cc_reg = XEXP (op, 0);
22545 machine_mode mode = GET_MODE (cc_reg);
22546 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22547 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22548 int really_reversed = reversed ^ need_longbranch;
22549 char *s = string;
22550 const char *ccode;
22551 const char *pred;
22552 rtx note;
22554 validate_condition_mode (code, mode);
22556 /* Work out which way this really branches. We could use
22557 reverse_condition_maybe_unordered here always but this
22558 makes the resulting assembler clearer. */
22559 if (really_reversed)
22561 /* Reversal of FP compares takes care -- an ordered compare
22562 becomes an unordered compare and vice versa. */
22563 if (mode == CCFPmode)
22564 code = reverse_condition_maybe_unordered (code);
22565 else
22566 code = reverse_condition (code);
22569 switch (code)
22571 /* Not all of these are actually distinct opcodes, but
22572 we distinguish them for clarity of the resulting assembler. */
22573 case NE: case LTGT:
22574 ccode = "ne"; break;
22575 case EQ: case UNEQ:
22576 ccode = "eq"; break;
22577 case GE: case GEU:
22578 ccode = "ge"; break;
22579 case GT: case GTU: case UNGT:
22580 ccode = "gt"; break;
22581 case LE: case LEU:
22582 ccode = "le"; break;
22583 case LT: case LTU: case UNLT:
22584 ccode = "lt"; break;
22585 case UNORDERED: ccode = "un"; break;
22586 case ORDERED: ccode = "nu"; break;
22587 case UNGE: ccode = "nl"; break;
22588 case UNLE: ccode = "ng"; break;
22589 default:
22590 gcc_unreachable ();
22593 /* Maybe we have a guess as to how likely the branch is. */
22594 pred = "";
22595 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22596 if (note != NULL_RTX)
22598 /* PROB is the difference from 50%. */
22599 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22600 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22602 /* Only hint for highly probable/improbable branches on newer cpus when
22603 we have real profile data, as static prediction overrides processor
22604 dynamic prediction. For older cpus we may as well always hint, but
22605 assume not taken for branches that are very close to 50% as a
22606 mispredicted taken branch is more expensive than a
22607 mispredicted not-taken branch. */
22608 if (rs6000_always_hint
22609 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22610 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22611 && br_prob_note_reliable_p (note)))
22613 if (abs (prob) > REG_BR_PROB_BASE / 20
22614 && ((prob > 0) ^ need_longbranch))
22615 pred = "+";
22616 else
22617 pred = "-";
22621 if (label == NULL)
22622 s += sprintf (s, "b%slr%s ", ccode, pred);
22623 else
22624 s += sprintf (s, "b%s%s ", ccode, pred);
22626 /* We need to escape any '%' characters in the reg_names string.
22627 Assume they'd only be the first character.... */
22628 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22629 *s++ = '%';
22630 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22632 if (label != NULL)
22634 /* If the branch distance was too far, we may have to use an
22635 unconditional branch to go the distance. */
22636 if (need_longbranch)
22637 s += sprintf (s, ",$+8\n\tb %s", label);
22638 else
22639 s += sprintf (s, ",%s", label);
22642 return string;
22645 /* Return insn for VSX or Altivec comparisons. */
22647 static rtx
22648 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22650 rtx mask;
22651 machine_mode mode = GET_MODE (op0);
22653 switch (code)
22655 default:
22656 break;
22658 case GE:
22659 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22660 return NULL_RTX;
22661 /* FALLTHRU */
22663 case EQ:
22664 case GT:
22665 case GTU:
22666 case ORDERED:
22667 case UNORDERED:
22668 case UNEQ:
22669 case LTGT:
22670 mask = gen_reg_rtx (mode);
22671 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22672 return mask;
22675 return NULL_RTX;
22678 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22679 DMODE is expected destination mode. This is a recursive function. */
22681 static rtx
22682 rs6000_emit_vector_compare (enum rtx_code rcode,
22683 rtx op0, rtx op1,
22684 machine_mode dmode)
22686 rtx mask;
22687 bool swap_operands = false;
22688 bool try_again = false;
22690 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22691 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22693 /* See if the comparison works as is. */
22694 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22695 if (mask)
22696 return mask;
22698 switch (rcode)
22700 case LT:
22701 rcode = GT;
22702 swap_operands = true;
22703 try_again = true;
22704 break;
22705 case LTU:
22706 rcode = GTU;
22707 swap_operands = true;
22708 try_again = true;
22709 break;
22710 case NE:
22711 case UNLE:
22712 case UNLT:
22713 case UNGE:
22714 case UNGT:
22715 /* Invert condition and try again.
22716 e.g., A != B becomes ~(A==B). */
22718 enum rtx_code rev_code;
22719 enum insn_code nor_code;
22720 rtx mask2;
22722 rev_code = reverse_condition_maybe_unordered (rcode);
22723 if (rev_code == UNKNOWN)
22724 return NULL_RTX;
22726 nor_code = optab_handler (one_cmpl_optab, dmode);
22727 if (nor_code == CODE_FOR_nothing)
22728 return NULL_RTX;
22730 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22731 if (!mask2)
22732 return NULL_RTX;
22734 mask = gen_reg_rtx (dmode);
22735 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22736 return mask;
22738 break;
22739 case GE:
22740 case GEU:
22741 case LE:
22742 case LEU:
22743 /* Try GT/GTU/LT/LTU OR EQ */
22745 rtx c_rtx, eq_rtx;
22746 enum insn_code ior_code;
22747 enum rtx_code new_code;
22749 switch (rcode)
22751 case GE:
22752 new_code = GT;
22753 break;
22755 case GEU:
22756 new_code = GTU;
22757 break;
22759 case LE:
22760 new_code = LT;
22761 break;
22763 case LEU:
22764 new_code = LTU;
22765 break;
22767 default:
22768 gcc_unreachable ();
22771 ior_code = optab_handler (ior_optab, dmode);
22772 if (ior_code == CODE_FOR_nothing)
22773 return NULL_RTX;
22775 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22776 if (!c_rtx)
22777 return NULL_RTX;
22779 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22780 if (!eq_rtx)
22781 return NULL_RTX;
22783 mask = gen_reg_rtx (dmode);
22784 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22785 return mask;
22787 break;
22788 default:
22789 return NULL_RTX;
22792 if (try_again)
22794 if (swap_operands)
22795 std::swap (op0, op1);
22797 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22798 if (mask)
22799 return mask;
22802 /* You only get two chances. */
22803 return NULL_RTX;
22806 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22807 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22808 operands for the relation operation COND. */
22811 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22812 rtx cond, rtx cc_op0, rtx cc_op1)
22814 machine_mode dest_mode = GET_MODE (dest);
22815 machine_mode mask_mode = GET_MODE (cc_op0);
22816 enum rtx_code rcode = GET_CODE (cond);
22817 machine_mode cc_mode = CCmode;
22818 rtx mask;
22819 rtx cond2;
22820 bool invert_move = false;
22822 if (VECTOR_UNIT_NONE_P (dest_mode))
22823 return 0;
22825 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22826 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22828 switch (rcode)
22830 /* Swap operands if we can, and fall back to doing the operation as
22831 specified, and doing a NOR to invert the test. */
22832 case NE:
22833 case UNLE:
22834 case UNLT:
22835 case UNGE:
22836 case UNGT:
22837 /* Invert condition and try again.
22838 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22839 invert_move = true;
22840 rcode = reverse_condition_maybe_unordered (rcode);
22841 if (rcode == UNKNOWN)
22842 return 0;
22843 break;
22845 case GE:
22846 case LE:
22847 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22849 /* Invert condition to avoid compound test. */
22850 invert_move = true;
22851 rcode = reverse_condition (rcode);
22853 break;
22855 case GTU:
22856 case GEU:
22857 case LTU:
22858 case LEU:
22859 /* Mark unsigned tests with CCUNSmode. */
22860 cc_mode = CCUNSmode;
22862 /* Invert condition to avoid compound test if necessary. */
22863 if (rcode == GEU || rcode == LEU)
22865 invert_move = true;
22866 rcode = reverse_condition (rcode);
22868 break;
22870 default:
22871 break;
22874 /* Get the vector mask for the given relational operations. */
22875 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22877 if (!mask)
22878 return 0;
22880 if (invert_move)
22881 std::swap (op_true, op_false);
22883 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22884 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22885 && (GET_CODE (op_true) == CONST_VECTOR
22886 || GET_CODE (op_false) == CONST_VECTOR))
22888 rtx constant_0 = CONST0_RTX (dest_mode);
22889 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22891 if (op_true == constant_m1 && op_false == constant_0)
22893 emit_move_insn (dest, mask);
22894 return 1;
22897 else if (op_true == constant_0 && op_false == constant_m1)
22899 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22900 return 1;
22903 /* If we can't use the vector comparison directly, perhaps we can use
22904 the mask for the true or false fields, instead of loading up a
22905 constant. */
22906 if (op_true == constant_m1)
22907 op_true = mask;
22909 if (op_false == constant_0)
22910 op_false = mask;
22913 if (!REG_P (op_true) && !SUBREG_P (op_true))
22914 op_true = force_reg (dest_mode, op_true);
22916 if (!REG_P (op_false) && !SUBREG_P (op_false))
22917 op_false = force_reg (dest_mode, op_false);
22919 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22920 CONST0_RTX (dest_mode));
22921 emit_insn (gen_rtx_SET (dest,
22922 gen_rtx_IF_THEN_ELSE (dest_mode,
22923 cond2,
22924 op_true,
22925 op_false)));
22926 return 1;
22929 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22930 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22931 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22932 hardware has no such operation. */
22934 static int
22935 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22937 enum rtx_code code = GET_CODE (op);
22938 rtx op0 = XEXP (op, 0);
22939 rtx op1 = XEXP (op, 1);
22940 machine_mode compare_mode = GET_MODE (op0);
22941 machine_mode result_mode = GET_MODE (dest);
22942 bool max_p = false;
22944 if (result_mode != compare_mode)
22945 return 0;
22947 if (code == GE || code == GT)
22948 max_p = true;
22949 else if (code == LE || code == LT)
22950 max_p = false;
22951 else
22952 return 0;
22954 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22957 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22958 max_p = !max_p;
22960 else
22961 return 0;
22963 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22964 return 1;
22967 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22968 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22969 operands of the last comparison is nonzero/true, FALSE_COND if it is
22970 zero/false. Return 0 if the hardware has no such operation. */
22972 static int
22973 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22975 enum rtx_code code = GET_CODE (op);
22976 rtx op0 = XEXP (op, 0);
22977 rtx op1 = XEXP (op, 1);
22978 machine_mode result_mode = GET_MODE (dest);
22979 rtx compare_rtx;
22980 rtx cmove_rtx;
22981 rtx clobber_rtx;
22983 if (!can_create_pseudo_p ())
22984 return 0;
22986 switch (code)
22988 case EQ:
22989 case GE:
22990 case GT:
22991 break;
22993 case NE:
22994 case LT:
22995 case LE:
22996 code = swap_condition (code);
22997 std::swap (op0, op1);
22998 break;
23000 default:
23001 return 0;
23004 /* Generate: [(parallel [(set (dest)
23005 (if_then_else (op (cmp1) (cmp2))
23006 (true)
23007 (false)))
23008 (clobber (scratch))])]. */
23010 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23011 cmove_rtx = gen_rtx_SET (dest,
23012 gen_rtx_IF_THEN_ELSE (result_mode,
23013 compare_rtx,
23014 true_cond,
23015 false_cond));
23017 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23018 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23019 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23021 return 1;
23024 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23025 operands of the last comparison is nonzero/true, FALSE_COND if it
23026 is zero/false. Return 0 if the hardware has no such operation. */
23029 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23031 enum rtx_code code = GET_CODE (op);
23032 rtx op0 = XEXP (op, 0);
23033 rtx op1 = XEXP (op, 1);
23034 machine_mode compare_mode = GET_MODE (op0);
23035 machine_mode result_mode = GET_MODE (dest);
23036 rtx temp;
23037 bool is_against_zero;
23039 /* These modes should always match. */
23040 if (GET_MODE (op1) != compare_mode
23041 /* In the isel case however, we can use a compare immediate, so
23042 op1 may be a small constant. */
23043 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23044 return 0;
23045 if (GET_MODE (true_cond) != result_mode)
23046 return 0;
23047 if (GET_MODE (false_cond) != result_mode)
23048 return 0;
23050 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23051 if (TARGET_P9_MINMAX
23052 && (compare_mode == SFmode || compare_mode == DFmode)
23053 && (result_mode == SFmode || result_mode == DFmode))
23055 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23056 return 1;
23058 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23059 return 1;
23062 /* Don't allow using floating point comparisons for integer results for
23063 now. */
23064 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23065 return 0;
23067 /* First, work out if the hardware can do this at all, or
23068 if it's too slow.... */
23069 if (!FLOAT_MODE_P (compare_mode))
23071 if (TARGET_ISEL)
23072 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23073 return 0;
23076 is_against_zero = op1 == CONST0_RTX (compare_mode);
23078 /* A floating-point subtract might overflow, underflow, or produce
23079 an inexact result, thus changing the floating-point flags, so it
23080 can't be generated if we care about that. It's safe if one side
23081 of the construct is zero, since then no subtract will be
23082 generated. */
23083 if (SCALAR_FLOAT_MODE_P (compare_mode)
23084 && flag_trapping_math && ! is_against_zero)
23085 return 0;
23087 /* Eliminate half of the comparisons by switching operands, this
23088 makes the remaining code simpler. */
23089 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23090 || code == LTGT || code == LT || code == UNLE)
23092 code = reverse_condition_maybe_unordered (code);
23093 temp = true_cond;
23094 true_cond = false_cond;
23095 false_cond = temp;
23098 /* UNEQ and LTGT take four instructions for a comparison with zero,
23099 it'll probably be faster to use a branch here too. */
23100 if (code == UNEQ && HONOR_NANS (compare_mode))
23101 return 0;
23103 /* We're going to try to implement comparisons by performing
23104 a subtract, then comparing against zero. Unfortunately,
23105 Inf - Inf is NaN which is not zero, and so if we don't
23106 know that the operand is finite and the comparison
23107 would treat EQ different to UNORDERED, we can't do it. */
23108 if (HONOR_INFINITIES (compare_mode)
23109 && code != GT && code != UNGE
23110 && (GET_CODE (op1) != CONST_DOUBLE
23111 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23112 /* Constructs of the form (a OP b ? a : b) are safe. */
23113 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23114 || (! rtx_equal_p (op0, true_cond)
23115 && ! rtx_equal_p (op1, true_cond))))
23116 return 0;
23118 /* At this point we know we can use fsel. */
23120 /* Reduce the comparison to a comparison against zero. */
23121 if (! is_against_zero)
23123 temp = gen_reg_rtx (compare_mode);
23124 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23125 op0 = temp;
23126 op1 = CONST0_RTX (compare_mode);
23129 /* If we don't care about NaNs we can reduce some of the comparisons
23130 down to faster ones. */
23131 if (! HONOR_NANS (compare_mode))
23132 switch (code)
23134 case GT:
23135 code = LE;
23136 temp = true_cond;
23137 true_cond = false_cond;
23138 false_cond = temp;
23139 break;
23140 case UNGE:
23141 code = GE;
23142 break;
23143 case UNEQ:
23144 code = EQ;
23145 break;
23146 default:
23147 break;
23150 /* Now, reduce everything down to a GE. */
23151 switch (code)
23153 case GE:
23154 break;
23156 case LE:
23157 temp = gen_reg_rtx (compare_mode);
23158 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23159 op0 = temp;
23160 break;
23162 case ORDERED:
23163 temp = gen_reg_rtx (compare_mode);
23164 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23165 op0 = temp;
23166 break;
23168 case EQ:
23169 temp = gen_reg_rtx (compare_mode);
23170 emit_insn (gen_rtx_SET (temp,
23171 gen_rtx_NEG (compare_mode,
23172 gen_rtx_ABS (compare_mode, op0))));
23173 op0 = temp;
23174 break;
23176 case UNGE:
23177 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23178 temp = gen_reg_rtx (result_mode);
23179 emit_insn (gen_rtx_SET (temp,
23180 gen_rtx_IF_THEN_ELSE (result_mode,
23181 gen_rtx_GE (VOIDmode,
23182 op0, op1),
23183 true_cond, false_cond)));
23184 false_cond = true_cond;
23185 true_cond = temp;
23187 temp = gen_reg_rtx (compare_mode);
23188 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23189 op0 = temp;
23190 break;
23192 case GT:
23193 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23194 temp = gen_reg_rtx (result_mode);
23195 emit_insn (gen_rtx_SET (temp,
23196 gen_rtx_IF_THEN_ELSE (result_mode,
23197 gen_rtx_GE (VOIDmode,
23198 op0, op1),
23199 true_cond, false_cond)));
23200 true_cond = false_cond;
23201 false_cond = temp;
23203 temp = gen_reg_rtx (compare_mode);
23204 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23205 op0 = temp;
23206 break;
23208 default:
23209 gcc_unreachable ();
23212 emit_insn (gen_rtx_SET (dest,
23213 gen_rtx_IF_THEN_ELSE (result_mode,
23214 gen_rtx_GE (VOIDmode,
23215 op0, op1),
23216 true_cond, false_cond)));
23217 return 1;
23220 /* Same as above, but for ints (isel). */
23223 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23225 rtx condition_rtx, cr;
23226 machine_mode mode = GET_MODE (dest);
23227 enum rtx_code cond_code;
23228 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23229 bool signedp;
23231 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23232 return 0;
23234 /* We still have to do the compare, because isel doesn't do a
23235 compare, it just looks at the CRx bits set by a previous compare
23236 instruction. */
23237 condition_rtx = rs6000_generate_compare (op, mode);
23238 cond_code = GET_CODE (condition_rtx);
23239 cr = XEXP (condition_rtx, 0);
23240 signedp = GET_MODE (cr) == CCmode;
23242 isel_func = (mode == SImode
23243 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23244 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23246 switch (cond_code)
23248 case LT: case GT: case LTU: case GTU: case EQ:
23249 /* isel handles these directly. */
23250 break;
23252 default:
23253 /* We need to swap the sense of the comparison. */
23255 std::swap (false_cond, true_cond);
23256 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23258 break;
23261 false_cond = force_reg (mode, false_cond);
23262 if (true_cond != const0_rtx)
23263 true_cond = force_reg (mode, true_cond);
23265 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23267 return 1;
23270 void
23271 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23273 machine_mode mode = GET_MODE (op0);
23274 enum rtx_code c;
23275 rtx target;
23277 /* VSX/altivec have direct min/max insns. */
23278 if ((code == SMAX || code == SMIN)
23279 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23280 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23282 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23283 return;
23286 if (code == SMAX || code == SMIN)
23287 c = GE;
23288 else
23289 c = GEU;
23291 if (code == SMAX || code == UMAX)
23292 target = emit_conditional_move (dest, c, op0, op1, mode,
23293 op0, op1, mode, 0);
23294 else
23295 target = emit_conditional_move (dest, c, op0, op1, mode,
23296 op1, op0, mode, 0);
23297 gcc_assert (target);
23298 if (target != dest)
23299 emit_move_insn (dest, target);
23302 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23303 for the value to come from memory or if it is already loaded into a GPR. */
23305 void
23306 rs6000_split_signbit (rtx dest, rtx src)
23308 machine_mode d_mode = GET_MODE (dest);
23309 machine_mode s_mode = GET_MODE (src);
23310 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23311 rtx shift_reg = dest_di;
23313 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23315 if (MEM_P (src))
23317 rtx mem = (WORDS_BIG_ENDIAN
23318 ? adjust_address (src, DImode, 0)
23319 : adjust_address (src, DImode, 8));
23320 emit_insn (gen_rtx_SET (dest_di, mem));
23323 else
23325 unsigned int r = reg_or_subregno (src);
23327 if (INT_REGNO_P (r))
23328 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23330 else
23332 /* Generate the special mfvsrd instruction to get it in a GPR. */
23333 gcc_assert (VSX_REGNO_P (r));
23334 if (s_mode == KFmode)
23335 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23336 else
23337 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23341 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23342 return;
23345 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23346 COND is true. Mark the jump as unlikely to be taken. */
23348 static void
23349 emit_unlikely_jump (rtx cond, rtx label)
23351 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23352 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23353 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23356 /* A subroutine of the atomic operation splitters. Emit a load-locked
23357 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23358 the zero_extend operation. */
23360 static void
23361 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23363 rtx (*fn) (rtx, rtx) = NULL;
23365 switch (mode)
23367 case E_QImode:
23368 fn = gen_load_lockedqi;
23369 break;
23370 case E_HImode:
23371 fn = gen_load_lockedhi;
23372 break;
23373 case E_SImode:
23374 if (GET_MODE (mem) == QImode)
23375 fn = gen_load_lockedqi_si;
23376 else if (GET_MODE (mem) == HImode)
23377 fn = gen_load_lockedhi_si;
23378 else
23379 fn = gen_load_lockedsi;
23380 break;
23381 case E_DImode:
23382 fn = gen_load_lockeddi;
23383 break;
23384 case E_TImode:
23385 fn = gen_load_lockedti;
23386 break;
23387 default:
23388 gcc_unreachable ();
23390 emit_insn (fn (reg, mem));
23393 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23394 instruction in MODE. */
23396 static void
23397 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23399 rtx (*fn) (rtx, rtx, rtx) = NULL;
23401 switch (mode)
23403 case E_QImode:
23404 fn = gen_store_conditionalqi;
23405 break;
23406 case E_HImode:
23407 fn = gen_store_conditionalhi;
23408 break;
23409 case E_SImode:
23410 fn = gen_store_conditionalsi;
23411 break;
23412 case E_DImode:
23413 fn = gen_store_conditionaldi;
23414 break;
23415 case E_TImode:
23416 fn = gen_store_conditionalti;
23417 break;
23418 default:
23419 gcc_unreachable ();
23422 /* Emit sync before stwcx. to address PPC405 Erratum. */
23423 if (PPC405_ERRATUM77)
23424 emit_insn (gen_hwsync ());
23426 emit_insn (fn (res, mem, val));
23429 /* Expand barriers before and after a load_locked/store_cond sequence. */
23431 static rtx
23432 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23434 rtx addr = XEXP (mem, 0);
23436 if (!legitimate_indirect_address_p (addr, reload_completed)
23437 && !legitimate_indexed_address_p (addr, reload_completed))
23439 addr = force_reg (Pmode, addr);
23440 mem = replace_equiv_address_nv (mem, addr);
23443 switch (model)
23445 case MEMMODEL_RELAXED:
23446 case MEMMODEL_CONSUME:
23447 case MEMMODEL_ACQUIRE:
23448 break;
23449 case MEMMODEL_RELEASE:
23450 case MEMMODEL_ACQ_REL:
23451 emit_insn (gen_lwsync ());
23452 break;
23453 case MEMMODEL_SEQ_CST:
23454 emit_insn (gen_hwsync ());
23455 break;
23456 default:
23457 gcc_unreachable ();
23459 return mem;
23462 static void
23463 rs6000_post_atomic_barrier (enum memmodel model)
23465 switch (model)
23467 case MEMMODEL_RELAXED:
23468 case MEMMODEL_CONSUME:
23469 case MEMMODEL_RELEASE:
23470 break;
23471 case MEMMODEL_ACQUIRE:
23472 case MEMMODEL_ACQ_REL:
23473 case MEMMODEL_SEQ_CST:
23474 emit_insn (gen_isync ());
23475 break;
23476 default:
23477 gcc_unreachable ();
23481 /* A subroutine of the various atomic expanders. For sub-word operations,
23482 we must adjust things to operate on SImode. Given the original MEM,
23483 return a new aligned memory. Also build and return the quantities by
23484 which to shift and mask. */
23486 static rtx
23487 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23489 rtx addr, align, shift, mask, mem;
23490 HOST_WIDE_INT shift_mask;
23491 machine_mode mode = GET_MODE (orig_mem);
23493 /* For smaller modes, we have to implement this via SImode. */
23494 shift_mask = (mode == QImode ? 0x18 : 0x10);
23496 addr = XEXP (orig_mem, 0);
23497 addr = force_reg (GET_MODE (addr), addr);
23499 /* Aligned memory containing subword. Generate a new memory. We
23500 do not want any of the existing MEM_ATTR data, as we're now
23501 accessing memory outside the original object. */
23502 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23503 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23504 mem = gen_rtx_MEM (SImode, align);
23505 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23506 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23507 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23509 /* Shift amount for subword relative to aligned word. */
23510 shift = gen_reg_rtx (SImode);
23511 addr = gen_lowpart (SImode, addr);
23512 rtx tmp = gen_reg_rtx (SImode);
23513 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23514 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23515 if (BYTES_BIG_ENDIAN)
23516 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23517 shift, 1, OPTAB_LIB_WIDEN);
23518 *pshift = shift;
23520 /* Mask for insertion. */
23521 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23522 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23523 *pmask = mask;
23525 return mem;
23528 /* A subroutine of the various atomic expanders. For sub-word operands,
23529 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23531 static rtx
23532 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23534 rtx x;
23536 x = gen_reg_rtx (SImode);
23537 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23538 gen_rtx_NOT (SImode, mask),
23539 oldval)));
23541 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23543 return x;
23546 /* A subroutine of the various atomic expanders. For sub-word operands,
23547 extract WIDE to NARROW via SHIFT. */
23549 static void
23550 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23552 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23553 wide, 1, OPTAB_LIB_WIDEN);
23554 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23557 /* Expand an atomic compare and swap operation. */
23559 void
23560 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23562 rtx boolval, retval, mem, oldval, newval, cond;
23563 rtx label1, label2, x, mask, shift;
23564 machine_mode mode, orig_mode;
23565 enum memmodel mod_s, mod_f;
23566 bool is_weak;
23568 boolval = operands[0];
23569 retval = operands[1];
23570 mem = operands[2];
23571 oldval = operands[3];
23572 newval = operands[4];
23573 is_weak = (INTVAL (operands[5]) != 0);
23574 mod_s = memmodel_base (INTVAL (operands[6]));
23575 mod_f = memmodel_base (INTVAL (operands[7]));
23576 orig_mode = mode = GET_MODE (mem);
23578 mask = shift = NULL_RTX;
23579 if (mode == QImode || mode == HImode)
23581 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23582 lwarx and shift/mask operations. With power8, we need to do the
23583 comparison in SImode, but the store is still done in QI/HImode. */
23584 oldval = convert_modes (SImode, mode, oldval, 1);
23586 if (!TARGET_SYNC_HI_QI)
23588 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23590 /* Shift and mask OLDVAL into position with the word. */
23591 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23592 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23594 /* Shift and mask NEWVAL into position within the word. */
23595 newval = convert_modes (SImode, mode, newval, 1);
23596 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23597 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23600 /* Prepare to adjust the return value. */
23601 retval = gen_reg_rtx (SImode);
23602 mode = SImode;
23604 else if (reg_overlap_mentioned_p (retval, oldval))
23605 oldval = copy_to_reg (oldval);
23607 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23608 oldval = copy_to_mode_reg (mode, oldval);
23610 if (reg_overlap_mentioned_p (retval, newval))
23611 newval = copy_to_reg (newval);
23613 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23615 label1 = NULL_RTX;
23616 if (!is_weak)
23618 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23619 emit_label (XEXP (label1, 0));
23621 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23623 emit_load_locked (mode, retval, mem);
23625 x = retval;
23626 if (mask)
23627 x = expand_simple_binop (SImode, AND, retval, mask,
23628 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23630 cond = gen_reg_rtx (CCmode);
23631 /* If we have TImode, synthesize a comparison. */
23632 if (mode != TImode)
23633 x = gen_rtx_COMPARE (CCmode, x, oldval);
23634 else
23636 rtx xor1_result = gen_reg_rtx (DImode);
23637 rtx xor2_result = gen_reg_rtx (DImode);
23638 rtx or_result = gen_reg_rtx (DImode);
23639 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23640 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23641 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23642 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23644 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23645 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23646 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23647 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23650 emit_insn (gen_rtx_SET (cond, x));
23652 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23653 emit_unlikely_jump (x, label2);
23655 x = newval;
23656 if (mask)
23657 x = rs6000_mask_atomic_subword (retval, newval, mask);
23659 emit_store_conditional (orig_mode, cond, mem, x);
23661 if (!is_weak)
23663 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23664 emit_unlikely_jump (x, label1);
23667 if (!is_mm_relaxed (mod_f))
23668 emit_label (XEXP (label2, 0));
23670 rs6000_post_atomic_barrier (mod_s);
23672 if (is_mm_relaxed (mod_f))
23673 emit_label (XEXP (label2, 0));
23675 if (shift)
23676 rs6000_finish_atomic_subword (operands[1], retval, shift);
23677 else if (mode != GET_MODE (operands[1]))
23678 convert_move (operands[1], retval, 1);
23680 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23681 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23682 emit_insn (gen_rtx_SET (boolval, x));
23685 /* Expand an atomic exchange operation. */
23687 void
23688 rs6000_expand_atomic_exchange (rtx operands[])
23690 rtx retval, mem, val, cond;
23691 machine_mode mode;
23692 enum memmodel model;
23693 rtx label, x, mask, shift;
23695 retval = operands[0];
23696 mem = operands[1];
23697 val = operands[2];
23698 model = memmodel_base (INTVAL (operands[3]));
23699 mode = GET_MODE (mem);
23701 mask = shift = NULL_RTX;
23702 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23704 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23706 /* Shift and mask VAL into position with the word. */
23707 val = convert_modes (SImode, mode, val, 1);
23708 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23709 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23711 /* Prepare to adjust the return value. */
23712 retval = gen_reg_rtx (SImode);
23713 mode = SImode;
23716 mem = rs6000_pre_atomic_barrier (mem, model);
23718 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23719 emit_label (XEXP (label, 0));
23721 emit_load_locked (mode, retval, mem);
23723 x = val;
23724 if (mask)
23725 x = rs6000_mask_atomic_subword (retval, val, mask);
23727 cond = gen_reg_rtx (CCmode);
23728 emit_store_conditional (mode, cond, mem, x);
23730 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23731 emit_unlikely_jump (x, label);
23733 rs6000_post_atomic_barrier (model);
23735 if (shift)
23736 rs6000_finish_atomic_subword (operands[0], retval, shift);
23739 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23740 to perform. MEM is the memory on which to operate. VAL is the second
23741 operand of the binary operator. BEFORE and AFTER are optional locations to
23742 return the value of MEM either before of after the operation. MODEL_RTX
23743 is a CONST_INT containing the memory model to use. */
23745 void
23746 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23747 rtx orig_before, rtx orig_after, rtx model_rtx)
23749 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23750 machine_mode mode = GET_MODE (mem);
23751 machine_mode store_mode = mode;
23752 rtx label, x, cond, mask, shift;
23753 rtx before = orig_before, after = orig_after;
23755 mask = shift = NULL_RTX;
23756 /* On power8, we want to use SImode for the operation. On previous systems,
23757 use the operation in a subword and shift/mask to get the proper byte or
23758 halfword. */
23759 if (mode == QImode || mode == HImode)
23761 if (TARGET_SYNC_HI_QI)
23763 val = convert_modes (SImode, mode, val, 1);
23765 /* Prepare to adjust the return value. */
23766 before = gen_reg_rtx (SImode);
23767 if (after)
23768 after = gen_reg_rtx (SImode);
23769 mode = SImode;
23771 else
23773 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23775 /* Shift and mask VAL into position with the word. */
23776 val = convert_modes (SImode, mode, val, 1);
23777 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23778 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23780 switch (code)
23782 case IOR:
23783 case XOR:
23784 /* We've already zero-extended VAL. That is sufficient to
23785 make certain that it does not affect other bits. */
23786 mask = NULL;
23787 break;
23789 case AND:
23790 /* If we make certain that all of the other bits in VAL are
23791 set, that will be sufficient to not affect other bits. */
23792 x = gen_rtx_NOT (SImode, mask);
23793 x = gen_rtx_IOR (SImode, x, val);
23794 emit_insn (gen_rtx_SET (val, x));
23795 mask = NULL;
23796 break;
23798 case NOT:
23799 case PLUS:
23800 case MINUS:
23801 /* These will all affect bits outside the field and need
23802 adjustment via MASK within the loop. */
23803 break;
23805 default:
23806 gcc_unreachable ();
23809 /* Prepare to adjust the return value. */
23810 before = gen_reg_rtx (SImode);
23811 if (after)
23812 after = gen_reg_rtx (SImode);
23813 store_mode = mode = SImode;
23817 mem = rs6000_pre_atomic_barrier (mem, model);
23819 label = gen_label_rtx ();
23820 emit_label (label);
23821 label = gen_rtx_LABEL_REF (VOIDmode, label);
23823 if (before == NULL_RTX)
23824 before = gen_reg_rtx (mode);
23826 emit_load_locked (mode, before, mem);
23828 if (code == NOT)
23830 x = expand_simple_binop (mode, AND, before, val,
23831 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23832 after = expand_simple_unop (mode, NOT, x, after, 1);
23834 else
23836 after = expand_simple_binop (mode, code, before, val,
23837 after, 1, OPTAB_LIB_WIDEN);
23840 x = after;
23841 if (mask)
23843 x = expand_simple_binop (SImode, AND, after, mask,
23844 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23845 x = rs6000_mask_atomic_subword (before, x, mask);
23847 else if (store_mode != mode)
23848 x = convert_modes (store_mode, mode, x, 1);
23850 cond = gen_reg_rtx (CCmode);
23851 emit_store_conditional (store_mode, cond, mem, x);
23853 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23854 emit_unlikely_jump (x, label);
23856 rs6000_post_atomic_barrier (model);
23858 if (shift)
23860 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23861 then do the calcuations in a SImode register. */
23862 if (orig_before)
23863 rs6000_finish_atomic_subword (orig_before, before, shift);
23864 if (orig_after)
23865 rs6000_finish_atomic_subword (orig_after, after, shift);
23867 else if (store_mode != mode)
23869 /* QImode/HImode on machines with lbarx/lharx where we do the native
23870 operation and then do the calcuations in a SImode register. */
23871 if (orig_before)
23872 convert_move (orig_before, before, 1);
23873 if (orig_after)
23874 convert_move (orig_after, after, 1);
23876 else if (orig_after && after != orig_after)
23877 emit_move_insn (orig_after, after);
23880 /* Emit instructions to move SRC to DST. Called by splitters for
23881 multi-register moves. It will emit at most one instruction for
23882 each register that is accessed; that is, it won't emit li/lis pairs
23883 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23884 register. */
23886 void
23887 rs6000_split_multireg_move (rtx dst, rtx src)
23889 /* The register number of the first register being moved. */
23890 int reg;
23891 /* The mode that is to be moved. */
23892 machine_mode mode;
23893 /* The mode that the move is being done in, and its size. */
23894 machine_mode reg_mode;
23895 int reg_mode_size;
23896 /* The number of registers that will be moved. */
23897 int nregs;
23899 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23900 mode = GET_MODE (dst);
23901 nregs = hard_regno_nregs (reg, mode);
23902 if (FP_REGNO_P (reg))
23903 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23904 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23905 else if (ALTIVEC_REGNO_P (reg))
23906 reg_mode = V16QImode;
23907 else
23908 reg_mode = word_mode;
23909 reg_mode_size = GET_MODE_SIZE (reg_mode);
23911 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23913 /* TDmode residing in FP registers is special, since the ISA requires that
23914 the lower-numbered word of a register pair is always the most significant
23915 word, even in little-endian mode. This does not match the usual subreg
23916 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23917 the appropriate constituent registers "by hand" in little-endian mode.
23919 Note we do not need to check for destructive overlap here since TDmode
23920 can only reside in even/odd register pairs. */
23921 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23923 rtx p_src, p_dst;
23924 int i;
23926 for (i = 0; i < nregs; i++)
23928 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23929 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23930 else
23931 p_src = simplify_gen_subreg (reg_mode, src, mode,
23932 i * reg_mode_size);
23934 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23935 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23936 else
23937 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23938 i * reg_mode_size);
23940 emit_insn (gen_rtx_SET (p_dst, p_src));
23943 return;
23946 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23948 /* Move register range backwards, if we might have destructive
23949 overlap. */
23950 int i;
23951 for (i = nregs - 1; i >= 0; i--)
23952 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23953 i * reg_mode_size),
23954 simplify_gen_subreg (reg_mode, src, mode,
23955 i * reg_mode_size)));
23957 else
23959 int i;
23960 int j = -1;
23961 bool used_update = false;
23962 rtx restore_basereg = NULL_RTX;
23964 if (MEM_P (src) && INT_REGNO_P (reg))
23966 rtx breg;
23968 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23969 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23971 rtx delta_rtx;
23972 breg = XEXP (XEXP (src, 0), 0);
23973 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23974 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23975 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23976 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23977 src = replace_equiv_address (src, breg);
23979 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23981 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23983 rtx basereg = XEXP (XEXP (src, 0), 0);
23984 if (TARGET_UPDATE)
23986 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23987 emit_insn (gen_rtx_SET (ndst,
23988 gen_rtx_MEM (reg_mode,
23989 XEXP (src, 0))));
23990 used_update = true;
23992 else
23993 emit_insn (gen_rtx_SET (basereg,
23994 XEXP (XEXP (src, 0), 1)));
23995 src = replace_equiv_address (src, basereg);
23997 else
23999 rtx basereg = gen_rtx_REG (Pmode, reg);
24000 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
24001 src = replace_equiv_address (src, basereg);
24005 breg = XEXP (src, 0);
24006 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
24007 breg = XEXP (breg, 0);
24009 /* If the base register we are using to address memory is
24010 also a destination reg, then change that register last. */
24011 if (REG_P (breg)
24012 && REGNO (breg) >= REGNO (dst)
24013 && REGNO (breg) < REGNO (dst) + nregs)
24014 j = REGNO (breg) - REGNO (dst);
24016 else if (MEM_P (dst) && INT_REGNO_P (reg))
24018 rtx breg;
24020 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24021 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24023 rtx delta_rtx;
24024 breg = XEXP (XEXP (dst, 0), 0);
24025 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24026 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24027 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24029 /* We have to update the breg before doing the store.
24030 Use store with update, if available. */
24032 if (TARGET_UPDATE)
24034 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24035 emit_insn (TARGET_32BIT
24036 ? (TARGET_POWERPC64
24037 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24038 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24039 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24040 used_update = true;
24042 else
24043 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24044 dst = replace_equiv_address (dst, breg);
24046 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
24047 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24049 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24051 rtx basereg = XEXP (XEXP (dst, 0), 0);
24052 if (TARGET_UPDATE)
24054 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24055 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24056 XEXP (dst, 0)),
24057 nsrc));
24058 used_update = true;
24060 else
24061 emit_insn (gen_rtx_SET (basereg,
24062 XEXP (XEXP (dst, 0), 1)));
24063 dst = replace_equiv_address (dst, basereg);
24065 else
24067 rtx basereg = XEXP (XEXP (dst, 0), 0);
24068 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24069 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24070 && REG_P (basereg)
24071 && REG_P (offsetreg)
24072 && REGNO (basereg) != REGNO (offsetreg));
24073 if (REGNO (basereg) == 0)
24075 rtx tmp = offsetreg;
24076 offsetreg = basereg;
24077 basereg = tmp;
24079 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24080 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24081 dst = replace_equiv_address (dst, basereg);
24084 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24085 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24088 for (i = 0; i < nregs; i++)
24090 /* Calculate index to next subword. */
24091 ++j;
24092 if (j == nregs)
24093 j = 0;
24095 /* If compiler already emitted move of first word by
24096 store with update, no need to do anything. */
24097 if (j == 0 && used_update)
24098 continue;
24100 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24101 j * reg_mode_size),
24102 simplify_gen_subreg (reg_mode, src, mode,
24103 j * reg_mode_size)));
24105 if (restore_basereg != NULL_RTX)
24106 emit_insn (restore_basereg);
24111 /* This page contains routines that are used to determine what the
24112 function prologue and epilogue code will do and write them out. */
24114 /* Determine whether the REG is really used. */
24116 static bool
24117 save_reg_p (int reg)
24119 /* We need to mark the PIC offset register live for the same conditions
24120 as it is set up, or otherwise it won't be saved before we clobber it. */
24122 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24124 /* When calling eh_return, we must return true for all the cases
24125 where conditional_register_usage marks the PIC offset reg
24126 call used. */
24127 if (TARGET_TOC && TARGET_MINIMAL_TOC
24128 && (crtl->calls_eh_return
24129 || df_regs_ever_live_p (reg)
24130 || !constant_pool_empty_p ()))
24131 return true;
24133 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24134 && flag_pic)
24135 return true;
24138 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24141 /* Return the first fixed-point register that is required to be
24142 saved. 32 if none. */
24145 first_reg_to_save (void)
24147 int first_reg;
24149 /* Find lowest numbered live register. */
24150 for (first_reg = 13; first_reg <= 31; first_reg++)
24151 if (save_reg_p (first_reg))
24152 break;
24154 #if TARGET_MACHO
24155 if (flag_pic
24156 && crtl->uses_pic_offset_table
24157 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24158 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24159 #endif
24161 return first_reg;
24164 /* Similar, for FP regs. */
24167 first_fp_reg_to_save (void)
24169 int first_reg;
24171 /* Find lowest numbered live register. */
24172 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24173 if (save_reg_p (first_reg))
24174 break;
24176 return first_reg;
24179 /* Similar, for AltiVec regs. */
24181 static int
24182 first_altivec_reg_to_save (void)
24184 int i;
24186 /* Stack frame remains as is unless we are in AltiVec ABI. */
24187 if (! TARGET_ALTIVEC_ABI)
24188 return LAST_ALTIVEC_REGNO + 1;
24190 /* On Darwin, the unwind routines are compiled without
24191 TARGET_ALTIVEC, and use save_world to save/restore the
24192 altivec registers when necessary. */
24193 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24194 && ! TARGET_ALTIVEC)
24195 return FIRST_ALTIVEC_REGNO + 20;
24197 /* Find lowest numbered live register. */
24198 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24199 if (save_reg_p (i))
24200 break;
24202 return i;
24205 /* Return a 32-bit mask of the AltiVec registers we need to set in
24206 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24207 the 32-bit word is 0. */
24209 static unsigned int
24210 compute_vrsave_mask (void)
24212 unsigned int i, mask = 0;
24214 /* On Darwin, the unwind routines are compiled without
24215 TARGET_ALTIVEC, and use save_world to save/restore the
24216 call-saved altivec registers when necessary. */
24217 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24218 && ! TARGET_ALTIVEC)
24219 mask |= 0xFFF;
24221 /* First, find out if we use _any_ altivec registers. */
24222 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24223 if (df_regs_ever_live_p (i))
24224 mask |= ALTIVEC_REG_BIT (i);
24226 if (mask == 0)
24227 return mask;
24229 /* Next, remove the argument registers from the set. These must
24230 be in the VRSAVE mask set by the caller, so we don't need to add
24231 them in again. More importantly, the mask we compute here is
24232 used to generate CLOBBERs in the set_vrsave insn, and we do not
24233 wish the argument registers to die. */
24234 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24235 mask &= ~ALTIVEC_REG_BIT (i);
24237 /* Similarly, remove the return value from the set. */
24239 bool yes = false;
24240 diddle_return_value (is_altivec_return_reg, &yes);
24241 if (yes)
24242 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24245 return mask;
24248 /* For a very restricted set of circumstances, we can cut down the
24249 size of prologues/epilogues by calling our own save/restore-the-world
24250 routines. */
24252 static void
24253 compute_save_world_info (rs6000_stack_t *info)
24255 info->world_save_p = 1;
24256 info->world_save_p
24257 = (WORLD_SAVE_P (info)
24258 && DEFAULT_ABI == ABI_DARWIN
24259 && !cfun->has_nonlocal_label
24260 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24261 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24262 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24263 && info->cr_save_p);
24265 /* This will not work in conjunction with sibcalls. Make sure there
24266 are none. (This check is expensive, but seldom executed.) */
24267 if (WORLD_SAVE_P (info))
24269 rtx_insn *insn;
24270 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24271 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24273 info->world_save_p = 0;
24274 break;
24278 if (WORLD_SAVE_P (info))
24280 /* Even if we're not touching VRsave, make sure there's room on the
24281 stack for it, if it looks like we're calling SAVE_WORLD, which
24282 will attempt to save it. */
24283 info->vrsave_size = 4;
24285 /* If we are going to save the world, we need to save the link register too. */
24286 info->lr_save_p = 1;
24288 /* "Save" the VRsave register too if we're saving the world. */
24289 if (info->vrsave_mask == 0)
24290 info->vrsave_mask = compute_vrsave_mask ();
24292 /* Because the Darwin register save/restore routines only handle
24293 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24294 check. */
24295 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24296 && (info->first_altivec_reg_save
24297 >= FIRST_SAVED_ALTIVEC_REGNO));
24300 return;
24304 static void
24305 is_altivec_return_reg (rtx reg, void *xyes)
24307 bool *yes = (bool *) xyes;
24308 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24309 *yes = true;
24313 /* Return whether REG is a global user reg or has been specifed by
24314 -ffixed-REG. We should not restore these, and so cannot use
24315 lmw or out-of-line restore functions if there are any. We also
24316 can't save them (well, emit frame notes for them), because frame
24317 unwinding during exception handling will restore saved registers. */
24319 static bool
24320 fixed_reg_p (int reg)
24322 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24323 backend sets it, overriding anything the user might have given. */
24324 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24325 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24326 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24327 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24328 return false;
24330 return fixed_regs[reg];
24333 /* Determine the strategy for savings/restoring registers. */
24335 enum {
24336 SAVE_MULTIPLE = 0x1,
24337 SAVE_INLINE_GPRS = 0x2,
24338 SAVE_INLINE_FPRS = 0x4,
24339 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24340 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24341 SAVE_INLINE_VRS = 0x20,
24342 REST_MULTIPLE = 0x100,
24343 REST_INLINE_GPRS = 0x200,
24344 REST_INLINE_FPRS = 0x400,
24345 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24346 REST_INLINE_VRS = 0x1000
24349 static int
24350 rs6000_savres_strategy (rs6000_stack_t *info,
24351 bool using_static_chain_p)
24353 int strategy = 0;
24355 /* Select between in-line and out-of-line save and restore of regs.
24356 First, all the obvious cases where we don't use out-of-line. */
24357 if (crtl->calls_eh_return
24358 || cfun->machine->ra_need_lr)
24359 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24360 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24361 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24363 if (info->first_gp_reg_save == 32)
24364 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24366 if (info->first_fp_reg_save == 64
24367 /* The out-of-line FP routines use double-precision stores;
24368 we can't use those routines if we don't have such stores. */
24369 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24370 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24372 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24373 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24375 /* Define cutoff for using out-of-line functions to save registers. */
24376 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24378 if (!optimize_size)
24380 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24381 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24382 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24384 else
24386 /* Prefer out-of-line restore if it will exit. */
24387 if (info->first_fp_reg_save > 61)
24388 strategy |= SAVE_INLINE_FPRS;
24389 if (info->first_gp_reg_save > 29)
24391 if (info->first_fp_reg_save == 64)
24392 strategy |= SAVE_INLINE_GPRS;
24393 else
24394 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24396 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24397 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24400 else if (DEFAULT_ABI == ABI_DARWIN)
24402 if (info->first_fp_reg_save > 60)
24403 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24404 if (info->first_gp_reg_save > 29)
24405 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24406 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24408 else
24410 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24411 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24412 || info->first_fp_reg_save > 61)
24413 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24414 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24415 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24418 /* Don't bother to try to save things out-of-line if r11 is occupied
24419 by the static chain. It would require too much fiddling and the
24420 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24421 pointer on Darwin, and AIX uses r1 or r12. */
24422 if (using_static_chain_p
24423 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24424 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24425 | SAVE_INLINE_GPRS
24426 | SAVE_INLINE_VRS);
24428 /* Don't ever restore fixed regs. That means we can't use the
24429 out-of-line register restore functions if a fixed reg is in the
24430 range of regs restored. */
24431 if (!(strategy & REST_INLINE_FPRS))
24432 for (int i = info->first_fp_reg_save; i < 64; i++)
24433 if (fixed_regs[i])
24435 strategy |= REST_INLINE_FPRS;
24436 break;
24439 /* We can only use the out-of-line routines to restore fprs if we've
24440 saved all the registers from first_fp_reg_save in the prologue.
24441 Otherwise, we risk loading garbage. Of course, if we have saved
24442 out-of-line then we know we haven't skipped any fprs. */
24443 if ((strategy & SAVE_INLINE_FPRS)
24444 && !(strategy & REST_INLINE_FPRS))
24445 for (int i = info->first_fp_reg_save; i < 64; i++)
24446 if (!save_reg_p (i))
24448 strategy |= REST_INLINE_FPRS;
24449 break;
24452 /* Similarly, for altivec regs. */
24453 if (!(strategy & REST_INLINE_VRS))
24454 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24455 if (fixed_regs[i])
24457 strategy |= REST_INLINE_VRS;
24458 break;
24461 if ((strategy & SAVE_INLINE_VRS)
24462 && !(strategy & REST_INLINE_VRS))
24463 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24464 if (!save_reg_p (i))
24466 strategy |= REST_INLINE_VRS;
24467 break;
24470 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24471 saved is an out-of-line save or restore. Set up the value for
24472 the next test (excluding out-of-line gprs). */
24473 bool lr_save_p = (info->lr_save_p
24474 || !(strategy & SAVE_INLINE_FPRS)
24475 || !(strategy & SAVE_INLINE_VRS)
24476 || !(strategy & REST_INLINE_FPRS)
24477 || !(strategy & REST_INLINE_VRS));
24479 if (TARGET_MULTIPLE
24480 && !TARGET_POWERPC64
24481 && info->first_gp_reg_save < 31
24482 && !(flag_shrink_wrap
24483 && flag_shrink_wrap_separate
24484 && optimize_function_for_speed_p (cfun)))
24486 int count = 0;
24487 for (int i = info->first_gp_reg_save; i < 32; i++)
24488 if (save_reg_p (i))
24489 count++;
24491 if (count <= 1)
24492 /* Don't use store multiple if only one reg needs to be
24493 saved. This can occur for example when the ABI_V4 pic reg
24494 (r30) needs to be saved to make calls, but r31 is not
24495 used. */
24496 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24497 else
24499 /* Prefer store multiple for saves over out-of-line
24500 routines, since the store-multiple instruction will
24501 always be smaller. */
24502 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24504 /* The situation is more complicated with load multiple.
24505 We'd prefer to use the out-of-line routines for restores,
24506 since the "exit" out-of-line routines can handle the
24507 restore of LR and the frame teardown. However if doesn't
24508 make sense to use the out-of-line routine if that is the
24509 only reason we'd need to save LR, and we can't use the
24510 "exit" out-of-line gpr restore if we have saved some
24511 fprs; In those cases it is advantageous to use load
24512 multiple when available. */
24513 if (info->first_fp_reg_save != 64 || !lr_save_p)
24514 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24518 /* Using the "exit" out-of-line routine does not improve code size
24519 if using it would require lr to be saved and if only saving one
24520 or two gprs. */
24521 else if (!lr_save_p && info->first_gp_reg_save > 29)
24522 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24524 /* Don't ever restore fixed regs. */
24525 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24526 for (int i = info->first_gp_reg_save; i < 32; i++)
24527 if (fixed_reg_p (i))
24529 strategy |= REST_INLINE_GPRS;
24530 strategy &= ~REST_MULTIPLE;
24531 break;
24534 /* We can only use load multiple or the out-of-line routines to
24535 restore gprs if we've saved all the registers from
24536 first_gp_reg_save. Otherwise, we risk loading garbage.
24537 Of course, if we have saved out-of-line or used stmw then we know
24538 we haven't skipped any gprs. */
24539 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24540 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24541 for (int i = info->first_gp_reg_save; i < 32; i++)
24542 if (!save_reg_p (i))
24544 strategy |= REST_INLINE_GPRS;
24545 strategy &= ~REST_MULTIPLE;
24546 break;
24549 if (TARGET_ELF && TARGET_64BIT)
24551 if (!(strategy & SAVE_INLINE_FPRS))
24552 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24553 else if (!(strategy & SAVE_INLINE_GPRS)
24554 && info->first_fp_reg_save == 64)
24555 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24557 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24558 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24560 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24561 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24563 return strategy;
24566 /* Calculate the stack information for the current function. This is
24567 complicated by having two separate calling sequences, the AIX calling
24568 sequence and the V.4 calling sequence.
24570 AIX (and Darwin/Mac OS X) stack frames look like:
24571 32-bit 64-bit
24572 SP----> +---------------------------------------+
24573 | back chain to caller | 0 0
24574 +---------------------------------------+
24575 | saved CR | 4 8 (8-11)
24576 +---------------------------------------+
24577 | saved LR | 8 16
24578 +---------------------------------------+
24579 | reserved for compilers | 12 24
24580 +---------------------------------------+
24581 | reserved for binders | 16 32
24582 +---------------------------------------+
24583 | saved TOC pointer | 20 40
24584 +---------------------------------------+
24585 | Parameter save area (+padding*) (P) | 24 48
24586 +---------------------------------------+
24587 | Alloca space (A) | 24+P etc.
24588 +---------------------------------------+
24589 | Local variable space (L) | 24+P+A
24590 +---------------------------------------+
24591 | Float/int conversion temporary (X) | 24+P+A+L
24592 +---------------------------------------+
24593 | Save area for AltiVec registers (W) | 24+P+A+L+X
24594 +---------------------------------------+
24595 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24596 +---------------------------------------+
24597 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24598 +---------------------------------------+
24599 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24600 +---------------------------------------+
24601 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24602 +---------------------------------------+
24603 old SP->| back chain to caller's caller |
24604 +---------------------------------------+
24606 * If the alloca area is present, the parameter save area is
24607 padded so that the former starts 16-byte aligned.
24609 The required alignment for AIX configurations is two words (i.e., 8
24610 or 16 bytes).
24612 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24614 SP----> +---------------------------------------+
24615 | Back chain to caller | 0
24616 +---------------------------------------+
24617 | Save area for CR | 8
24618 +---------------------------------------+
24619 | Saved LR | 16
24620 +---------------------------------------+
24621 | Saved TOC pointer | 24
24622 +---------------------------------------+
24623 | Parameter save area (+padding*) (P) | 32
24624 +---------------------------------------+
24625 | Alloca space (A) | 32+P
24626 +---------------------------------------+
24627 | Local variable space (L) | 32+P+A
24628 +---------------------------------------+
24629 | Save area for AltiVec registers (W) | 32+P+A+L
24630 +---------------------------------------+
24631 | AltiVec alignment padding (Y) | 32+P+A+L+W
24632 +---------------------------------------+
24633 | Save area for GP registers (G) | 32+P+A+L+W+Y
24634 +---------------------------------------+
24635 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24636 +---------------------------------------+
24637 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24638 +---------------------------------------+
24640 * If the alloca area is present, the parameter save area is
24641 padded so that the former starts 16-byte aligned.
24643 V.4 stack frames look like:
24645 SP----> +---------------------------------------+
24646 | back chain to caller | 0
24647 +---------------------------------------+
24648 | caller's saved LR | 4
24649 +---------------------------------------+
24650 | Parameter save area (+padding*) (P) | 8
24651 +---------------------------------------+
24652 | Alloca space (A) | 8+P
24653 +---------------------------------------+
24654 | Varargs save area (V) | 8+P+A
24655 +---------------------------------------+
24656 | Local variable space (L) | 8+P+A+V
24657 +---------------------------------------+
24658 | Float/int conversion temporary (X) | 8+P+A+V+L
24659 +---------------------------------------+
24660 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24661 +---------------------------------------+
24662 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24663 +---------------------------------------+
24664 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24665 +---------------------------------------+
24666 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24667 +---------------------------------------+
24668 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24669 +---------------------------------------+
24670 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24671 +---------------------------------------+
24672 old SP->| back chain to caller's caller |
24673 +---------------------------------------+
24675 * If the alloca area is present and the required alignment is
24676 16 bytes, the parameter save area is padded so that the
24677 alloca area starts 16-byte aligned.
24679 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24680 given. (But note below and in sysv4.h that we require only 8 and
24681 may round up the size of our stack frame anyways. The historical
24682 reason is early versions of powerpc-linux which didn't properly
24683 align the stack at program startup. A happy side-effect is that
24684 -mno-eabi libraries can be used with -meabi programs.)
24686 The EABI configuration defaults to the V.4 layout. However,
24687 the stack alignment requirements may differ. If -mno-eabi is not
24688 given, the required stack alignment is 8 bytes; if -mno-eabi is
24689 given, the required alignment is 16 bytes. (But see V.4 comment
24690 above.) */
24692 #ifndef ABI_STACK_BOUNDARY
24693 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24694 #endif
24696 static rs6000_stack_t *
24697 rs6000_stack_info (void)
24699 /* We should never be called for thunks, we are not set up for that. */
24700 gcc_assert (!cfun->is_thunk);
24702 rs6000_stack_t *info = &stack_info;
24703 int reg_size = TARGET_32BIT ? 4 : 8;
24704 int ehrd_size;
24705 int ehcr_size;
24706 int save_align;
24707 int first_gp;
24708 HOST_WIDE_INT non_fixed_size;
24709 bool using_static_chain_p;
24711 if (reload_completed && info->reload_completed)
24712 return info;
24714 memset (info, 0, sizeof (*info));
24715 info->reload_completed = reload_completed;
24717 /* Select which calling sequence. */
24718 info->abi = DEFAULT_ABI;
24720 /* Calculate which registers need to be saved & save area size. */
24721 info->first_gp_reg_save = first_reg_to_save ();
24722 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24723 even if it currently looks like we won't. Reload may need it to
24724 get at a constant; if so, it will have already created a constant
24725 pool entry for it. */
24726 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24727 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24728 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24729 && crtl->uses_const_pool
24730 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24731 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24732 else
24733 first_gp = info->first_gp_reg_save;
24735 info->gp_size = reg_size * (32 - first_gp);
24737 info->first_fp_reg_save = first_fp_reg_to_save ();
24738 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24740 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24741 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24742 - info->first_altivec_reg_save);
24744 /* Does this function call anything? */
24745 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24747 /* Determine if we need to save the condition code registers. */
24748 if (save_reg_p (CR2_REGNO)
24749 || save_reg_p (CR3_REGNO)
24750 || save_reg_p (CR4_REGNO))
24752 info->cr_save_p = 1;
24753 if (DEFAULT_ABI == ABI_V4)
24754 info->cr_size = reg_size;
24757 /* If the current function calls __builtin_eh_return, then we need
24758 to allocate stack space for registers that will hold data for
24759 the exception handler. */
24760 if (crtl->calls_eh_return)
24762 unsigned int i;
24763 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24764 continue;
24766 ehrd_size = i * UNITS_PER_WORD;
24768 else
24769 ehrd_size = 0;
24771 /* In the ELFv2 ABI, we also need to allocate space for separate
24772 CR field save areas if the function calls __builtin_eh_return. */
24773 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24775 /* This hard-codes that we have three call-saved CR fields. */
24776 ehcr_size = 3 * reg_size;
24777 /* We do *not* use the regular CR save mechanism. */
24778 info->cr_save_p = 0;
24780 else
24781 ehcr_size = 0;
24783 /* Determine various sizes. */
24784 info->reg_size = reg_size;
24785 info->fixed_size = RS6000_SAVE_AREA;
24786 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24787 if (cfun->calls_alloca)
24788 info->parm_size =
24789 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24790 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24791 else
24792 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24793 TARGET_ALTIVEC ? 16 : 8);
24794 if (FRAME_GROWS_DOWNWARD)
24795 info->vars_size
24796 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24797 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24798 - (info->fixed_size + info->vars_size + info->parm_size);
24800 if (TARGET_ALTIVEC_ABI)
24801 info->vrsave_mask = compute_vrsave_mask ();
24803 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24804 info->vrsave_size = 4;
24806 compute_save_world_info (info);
24808 /* Calculate the offsets. */
24809 switch (DEFAULT_ABI)
24811 case ABI_NONE:
24812 default:
24813 gcc_unreachable ();
24815 case ABI_AIX:
24816 case ABI_ELFv2:
24817 case ABI_DARWIN:
24818 info->fp_save_offset = -info->fp_size;
24819 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24821 if (TARGET_ALTIVEC_ABI)
24823 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24825 /* Align stack so vector save area is on a quadword boundary.
24826 The padding goes above the vectors. */
24827 if (info->altivec_size != 0)
24828 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24830 info->altivec_save_offset = info->vrsave_save_offset
24831 - info->altivec_padding_size
24832 - info->altivec_size;
24833 gcc_assert (info->altivec_size == 0
24834 || info->altivec_save_offset % 16 == 0);
24836 /* Adjust for AltiVec case. */
24837 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24839 else
24840 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24842 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24843 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24844 info->lr_save_offset = 2*reg_size;
24845 break;
24847 case ABI_V4:
24848 info->fp_save_offset = -info->fp_size;
24849 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24850 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24852 if (TARGET_ALTIVEC_ABI)
24854 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24856 /* Align stack so vector save area is on a quadword boundary. */
24857 if (info->altivec_size != 0)
24858 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24860 info->altivec_save_offset = info->vrsave_save_offset
24861 - info->altivec_padding_size
24862 - info->altivec_size;
24864 /* Adjust for AltiVec case. */
24865 info->ehrd_offset = info->altivec_save_offset;
24867 else
24868 info->ehrd_offset = info->cr_save_offset;
24870 info->ehrd_offset -= ehrd_size;
24871 info->lr_save_offset = reg_size;
24874 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24875 info->save_size = RS6000_ALIGN (info->fp_size
24876 + info->gp_size
24877 + info->altivec_size
24878 + info->altivec_padding_size
24879 + ehrd_size
24880 + ehcr_size
24881 + info->cr_size
24882 + info->vrsave_size,
24883 save_align);
24885 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24887 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24888 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24890 /* Determine if we need to save the link register. */
24891 if (info->calls_p
24892 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24893 && crtl->profile
24894 && !TARGET_PROFILE_KERNEL)
24895 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24896 #ifdef TARGET_RELOCATABLE
24897 || (DEFAULT_ABI == ABI_V4
24898 && (TARGET_RELOCATABLE || flag_pic > 1)
24899 && !constant_pool_empty_p ())
24900 #endif
24901 || rs6000_ra_ever_killed ())
24902 info->lr_save_p = 1;
24904 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24905 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24906 && call_used_regs[STATIC_CHAIN_REGNUM]);
24907 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24909 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24910 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24911 || !(info->savres_strategy & SAVE_INLINE_VRS)
24912 || !(info->savres_strategy & REST_INLINE_GPRS)
24913 || !(info->savres_strategy & REST_INLINE_FPRS)
24914 || !(info->savres_strategy & REST_INLINE_VRS))
24915 info->lr_save_p = 1;
24917 if (info->lr_save_p)
24918 df_set_regs_ever_live (LR_REGNO, true);
24920 /* Determine if we need to allocate any stack frame:
24922 For AIX we need to push the stack if a frame pointer is needed
24923 (because the stack might be dynamically adjusted), if we are
24924 debugging, if we make calls, or if the sum of fp_save, gp_save,
24925 and local variables are more than the space needed to save all
24926 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24927 + 18*8 = 288 (GPR13 reserved).
24929 For V.4 we don't have the stack cushion that AIX uses, but assume
24930 that the debugger can handle stackless frames. */
24932 if (info->calls_p)
24933 info->push_p = 1;
24935 else if (DEFAULT_ABI == ABI_V4)
24936 info->push_p = non_fixed_size != 0;
24938 else if (frame_pointer_needed)
24939 info->push_p = 1;
24941 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24942 info->push_p = 1;
24944 else
24945 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24947 return info;
24950 static void
24951 debug_stack_info (rs6000_stack_t *info)
24953 const char *abi_string;
24955 if (! info)
24956 info = rs6000_stack_info ();
24958 fprintf (stderr, "\nStack information for function %s:\n",
24959 ((current_function_decl && DECL_NAME (current_function_decl))
24960 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24961 : "<unknown>"));
24963 switch (info->abi)
24965 default: abi_string = "Unknown"; break;
24966 case ABI_NONE: abi_string = "NONE"; break;
24967 case ABI_AIX: abi_string = "AIX"; break;
24968 case ABI_ELFv2: abi_string = "ELFv2"; break;
24969 case ABI_DARWIN: abi_string = "Darwin"; break;
24970 case ABI_V4: abi_string = "V.4"; break;
24973 fprintf (stderr, "\tABI = %5s\n", abi_string);
24975 if (TARGET_ALTIVEC_ABI)
24976 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24978 if (info->first_gp_reg_save != 32)
24979 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24981 if (info->first_fp_reg_save != 64)
24982 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24984 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24985 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24986 info->first_altivec_reg_save);
24988 if (info->lr_save_p)
24989 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24991 if (info->cr_save_p)
24992 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24994 if (info->vrsave_mask)
24995 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24997 if (info->push_p)
24998 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
25000 if (info->calls_p)
25001 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
25003 if (info->gp_size)
25004 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
25006 if (info->fp_size)
25007 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
25009 if (info->altivec_size)
25010 fprintf (stderr, "\taltivec_save_offset = %5d\n",
25011 info->altivec_save_offset);
25013 if (info->vrsave_size)
25014 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25015 info->vrsave_save_offset);
25017 if (info->lr_save_p)
25018 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25020 if (info->cr_save_p)
25021 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25023 if (info->varargs_save_offset)
25024 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25026 if (info->total_size)
25027 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25028 info->total_size);
25030 if (info->vars_size)
25031 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25032 info->vars_size);
25034 if (info->parm_size)
25035 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25037 if (info->fixed_size)
25038 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25040 if (info->gp_size)
25041 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25043 if (info->fp_size)
25044 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25046 if (info->altivec_size)
25047 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25049 if (info->vrsave_size)
25050 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25052 if (info->altivec_padding_size)
25053 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25054 info->altivec_padding_size);
25056 if (info->cr_size)
25057 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25059 if (info->save_size)
25060 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25062 if (info->reg_size != 4)
25063 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25065 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25067 fprintf (stderr, "\n");
25071 rs6000_return_addr (int count, rtx frame)
25073 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25074 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25075 if (count != 0
25076 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25078 cfun->machine->ra_needs_full_frame = 1;
25080 if (count == 0)
25081 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25082 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25083 frame = stack_pointer_rtx;
25084 rtx prev_frame_addr = memory_address (Pmode, frame);
25085 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25086 rtx lr_save_off = plus_constant (Pmode,
25087 prev_frame, RETURN_ADDRESS_OFFSET);
25088 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25089 return gen_rtx_MEM (Pmode, lr_save_addr);
25092 cfun->machine->ra_need_lr = 1;
25093 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25096 /* Say whether a function is a candidate for sibcall handling or not. */
25098 static bool
25099 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25101 tree fntype;
25103 if (decl)
25104 fntype = TREE_TYPE (decl);
25105 else
25106 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25108 /* We can't do it if the called function has more vector parameters
25109 than the current function; there's nowhere to put the VRsave code. */
25110 if (TARGET_ALTIVEC_ABI
25111 && TARGET_ALTIVEC_VRSAVE
25112 && !(decl && decl == current_function_decl))
25114 function_args_iterator args_iter;
25115 tree type;
25116 int nvreg = 0;
25118 /* Functions with vector parameters are required to have a
25119 prototype, so the argument type info must be available
25120 here. */
25121 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25122 if (TREE_CODE (type) == VECTOR_TYPE
25123 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25124 nvreg++;
25126 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25127 if (TREE_CODE (type) == VECTOR_TYPE
25128 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25129 nvreg--;
25131 if (nvreg > 0)
25132 return false;
25135 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25136 functions, because the callee may have a different TOC pointer to
25137 the caller and there's no way to ensure we restore the TOC when
25138 we return. With the secure-plt SYSV ABI we can't make non-local
25139 calls when -fpic/PIC because the plt call stubs use r30. */
25140 if (DEFAULT_ABI == ABI_DARWIN
25141 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25142 && decl
25143 && !DECL_EXTERNAL (decl)
25144 && !DECL_WEAK (decl)
25145 && (*targetm.binds_local_p) (decl))
25146 || (DEFAULT_ABI == ABI_V4
25147 && (!TARGET_SECURE_PLT
25148 || !flag_pic
25149 || (decl
25150 && (*targetm.binds_local_p) (decl)))))
25152 tree attr_list = TYPE_ATTRIBUTES (fntype);
25154 if (!lookup_attribute ("longcall", attr_list)
25155 || lookup_attribute ("shortcall", attr_list))
25156 return true;
25159 return false;
25162 static int
25163 rs6000_ra_ever_killed (void)
25165 rtx_insn *top;
25166 rtx reg;
25167 rtx_insn *insn;
25169 if (cfun->is_thunk)
25170 return 0;
25172 if (cfun->machine->lr_save_state)
25173 return cfun->machine->lr_save_state - 1;
25175 /* regs_ever_live has LR marked as used if any sibcalls are present,
25176 but this should not force saving and restoring in the
25177 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25178 clobbers LR, so that is inappropriate. */
25180 /* Also, the prologue can generate a store into LR that
25181 doesn't really count, like this:
25183 move LR->R0
25184 bcl to set PIC register
25185 move LR->R31
25186 move R0->LR
25188 When we're called from the epilogue, we need to avoid counting
25189 this as a store. */
25191 push_topmost_sequence ();
25192 top = get_insns ();
25193 pop_topmost_sequence ();
25194 reg = gen_rtx_REG (Pmode, LR_REGNO);
25196 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25198 if (INSN_P (insn))
25200 if (CALL_P (insn))
25202 if (!SIBLING_CALL_P (insn))
25203 return 1;
25205 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25206 return 1;
25207 else if (set_of (reg, insn) != NULL_RTX
25208 && !prologue_epilogue_contains (insn))
25209 return 1;
25212 return 0;
25215 /* Emit instructions needed to load the TOC register.
25216 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25217 a constant pool; or for SVR4 -fpic. */
25219 void
25220 rs6000_emit_load_toc_table (int fromprolog)
25222 rtx dest;
25223 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25225 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25227 char buf[30];
25228 rtx lab, tmp1, tmp2, got;
25230 lab = gen_label_rtx ();
25231 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25232 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25233 if (flag_pic == 2)
25235 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25236 need_toc_init = 1;
25238 else
25239 got = rs6000_got_sym ();
25240 tmp1 = tmp2 = dest;
25241 if (!fromprolog)
25243 tmp1 = gen_reg_rtx (Pmode);
25244 tmp2 = gen_reg_rtx (Pmode);
25246 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25247 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25248 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25249 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25251 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25253 emit_insn (gen_load_toc_v4_pic_si ());
25254 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25256 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25258 char buf[30];
25259 rtx temp0 = (fromprolog
25260 ? gen_rtx_REG (Pmode, 0)
25261 : gen_reg_rtx (Pmode));
25263 if (fromprolog)
25265 rtx symF, symL;
25267 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25268 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25270 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25271 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25273 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25274 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25275 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25277 else
25279 rtx tocsym, lab;
25281 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25282 need_toc_init = 1;
25283 lab = gen_label_rtx ();
25284 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25285 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25286 if (TARGET_LINK_STACK)
25287 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25288 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25290 emit_insn (gen_addsi3 (dest, temp0, dest));
25292 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25294 /* This is for AIX code running in non-PIC ELF32. */
25295 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25297 need_toc_init = 1;
25298 emit_insn (gen_elf_high (dest, realsym));
25299 emit_insn (gen_elf_low (dest, dest, realsym));
25301 else
25303 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25305 if (TARGET_32BIT)
25306 emit_insn (gen_load_toc_aix_si (dest));
25307 else
25308 emit_insn (gen_load_toc_aix_di (dest));
25312 /* Emit instructions to restore the link register after determining where
25313 its value has been stored. */
25315 void
25316 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25318 rs6000_stack_t *info = rs6000_stack_info ();
25319 rtx operands[2];
25321 operands[0] = source;
25322 operands[1] = scratch;
25324 if (info->lr_save_p)
25326 rtx frame_rtx = stack_pointer_rtx;
25327 HOST_WIDE_INT sp_offset = 0;
25328 rtx tmp;
25330 if (frame_pointer_needed
25331 || cfun->calls_alloca
25332 || info->total_size > 32767)
25334 tmp = gen_frame_mem (Pmode, frame_rtx);
25335 emit_move_insn (operands[1], tmp);
25336 frame_rtx = operands[1];
25338 else if (info->push_p)
25339 sp_offset = info->total_size;
25341 tmp = plus_constant (Pmode, frame_rtx,
25342 info->lr_save_offset + sp_offset);
25343 tmp = gen_frame_mem (Pmode, tmp);
25344 emit_move_insn (tmp, operands[0]);
25346 else
25347 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25349 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25350 state of lr_save_p so any change from here on would be a bug. In
25351 particular, stop rs6000_ra_ever_killed from considering the SET
25352 of lr we may have added just above. */
25353 cfun->machine->lr_save_state = info->lr_save_p + 1;
25356 static GTY(()) alias_set_type set = -1;
25358 alias_set_type
25359 get_TOC_alias_set (void)
25361 if (set == -1)
25362 set = new_alias_set ();
25363 return set;
25366 /* This returns nonzero if the current function uses the TOC. This is
25367 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25368 is generated by the ABI_V4 load_toc_* patterns.
25369 Return 2 instead of 1 if the load_toc_* pattern is in the function
25370 partition that doesn't start the function. */
25371 #if TARGET_ELF
25372 static int
25373 uses_TOC (void)
25375 rtx_insn *insn;
25376 int ret = 1;
25378 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25380 if (INSN_P (insn))
25382 rtx pat = PATTERN (insn);
25383 int i;
25385 if (GET_CODE (pat) == PARALLEL)
25386 for (i = 0; i < XVECLEN (pat, 0); i++)
25388 rtx sub = XVECEXP (pat, 0, i);
25389 if (GET_CODE (sub) == USE)
25391 sub = XEXP (sub, 0);
25392 if (GET_CODE (sub) == UNSPEC
25393 && XINT (sub, 1) == UNSPEC_TOC)
25394 return ret;
25398 else if (crtl->has_bb_partition
25399 && NOTE_P (insn)
25400 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25401 ret = 2;
25403 return 0;
25405 #endif
25408 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25410 rtx tocrel, tocreg, hi;
25412 if (TARGET_DEBUG_ADDR)
25414 if (GET_CODE (symbol) == SYMBOL_REF)
25415 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25416 XSTR (symbol, 0));
25417 else
25419 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25420 GET_RTX_NAME (GET_CODE (symbol)));
25421 debug_rtx (symbol);
25425 if (!can_create_pseudo_p ())
25426 df_set_regs_ever_live (TOC_REGISTER, true);
25428 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25429 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25430 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25431 return tocrel;
25433 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25434 if (largetoc_reg != NULL)
25436 emit_move_insn (largetoc_reg, hi);
25437 hi = largetoc_reg;
25439 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25442 /* Issue assembly directives that create a reference to the given DWARF
25443 FRAME_TABLE_LABEL from the current function section. */
25444 void
25445 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25447 fprintf (asm_out_file, "\t.ref %s\n",
25448 (* targetm.strip_name_encoding) (frame_table_label));
25451 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25452 and the change to the stack pointer. */
25454 static void
25455 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25457 rtvec p;
25458 int i;
25459 rtx regs[3];
25461 i = 0;
25462 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25463 if (hard_frame_needed)
25464 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25465 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25466 || (hard_frame_needed
25467 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25468 regs[i++] = fp;
25470 p = rtvec_alloc (i);
25471 while (--i >= 0)
25473 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25474 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25477 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25480 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25481 and set the appropriate attributes for the generated insn. Return the
25482 first insn which adjusts the stack pointer or the last insn before
25483 the stack adjustment loop.
25485 SIZE_INT is used to create the CFI note for the allocation.
25487 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25488 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25490 ORIG_SP contains the backchain value that must be stored at *sp. */
25492 static rtx_insn *
25493 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25495 rtx_insn *insn;
25497 rtx size_rtx = GEN_INT (-size_int);
25498 if (size_int > 32767)
25500 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25501 /* Need a note here so that try_split doesn't get confused. */
25502 if (get_last_insn () == NULL_RTX)
25503 emit_note (NOTE_INSN_DELETED);
25504 insn = emit_move_insn (tmp_reg, size_rtx);
25505 try_split (PATTERN (insn), insn, 0);
25506 size_rtx = tmp_reg;
25509 if (Pmode == SImode)
25510 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25511 stack_pointer_rtx,
25512 size_rtx,
25513 orig_sp));
25514 else
25515 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25516 stack_pointer_rtx,
25517 size_rtx,
25518 orig_sp));
25519 rtx par = PATTERN (insn);
25520 gcc_assert (GET_CODE (par) == PARALLEL);
25521 rtx set = XVECEXP (par, 0, 0);
25522 gcc_assert (GET_CODE (set) == SET);
25523 rtx mem = SET_DEST (set);
25524 gcc_assert (MEM_P (mem));
25525 MEM_NOTRAP_P (mem) = 1;
25526 set_mem_alias_set (mem, get_frame_alias_set ());
25528 RTX_FRAME_RELATED_P (insn) = 1;
25529 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25530 gen_rtx_SET (stack_pointer_rtx,
25531 gen_rtx_PLUS (Pmode,
25532 stack_pointer_rtx,
25533 GEN_INT (-size_int))));
25535 /* Emit a blockage to ensure the allocation/probing insns are
25536 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25537 note for similar reasons. */
25538 if (flag_stack_clash_protection)
25540 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25541 emit_insn (gen_blockage ());
25544 return insn;
25547 static HOST_WIDE_INT
25548 get_stack_clash_protection_probe_interval (void)
25550 return (HOST_WIDE_INT_1U
25551 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25554 static HOST_WIDE_INT
25555 get_stack_clash_protection_guard_size (void)
25557 return (HOST_WIDE_INT_1U
25558 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25561 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25562 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25564 COPY_REG, if non-null, should contain a copy of the original
25565 stack pointer at exit from this function.
25567 This is subtly different than the Ada probing in that it tries hard to
25568 prevent attacks that jump the stack guard. Thus it is never allowed to
25569 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25570 space without a suitable probe. */
25571 static rtx_insn *
25572 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25573 rtx copy_reg)
25575 rtx orig_sp = copy_reg;
25577 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25579 /* Round the size down to a multiple of PROBE_INTERVAL. */
25580 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25582 /* If explicitly requested,
25583 or the rounded size is not the same as the original size
25584 or the the rounded size is greater than a page,
25585 then we will need a copy of the original stack pointer. */
25586 if (rounded_size != orig_size
25587 || rounded_size > probe_interval
25588 || copy_reg)
25590 /* If the caller did not request a copy of the incoming stack
25591 pointer, then we use r0 to hold the copy. */
25592 if (!copy_reg)
25593 orig_sp = gen_rtx_REG (Pmode, 0);
25594 emit_move_insn (orig_sp, stack_pointer_rtx);
25597 /* There's three cases here.
25599 One is a single probe which is the most common and most efficiently
25600 implemented as it does not have to have a copy of the original
25601 stack pointer if there are no residuals.
25603 Second is unrolled allocation/probes which we use if there's just
25604 a few of them. It needs to save the original stack pointer into a
25605 temporary for use as a source register in the allocation/probe.
25607 Last is a loop. This is the most uncommon case and least efficient. */
25608 rtx_insn *retval = NULL;
25609 if (rounded_size == probe_interval)
25611 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25613 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25615 else if (rounded_size <= 8 * probe_interval)
25617 /* The ABI requires using the store with update insns to allocate
25618 space and store the backchain into the stack
25620 So we save the current stack pointer into a temporary, then
25621 emit the store-with-update insns to store the saved stack pointer
25622 into the right location in each new page. */
25623 for (int i = 0; i < rounded_size; i += probe_interval)
25625 rtx_insn *insn
25626 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25628 /* Save the first stack adjustment in RETVAL. */
25629 if (i == 0)
25630 retval = insn;
25633 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25635 else
25637 /* Compute the ending address. */
25638 rtx end_addr
25639 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25640 rtx rs = GEN_INT (-rounded_size);
25641 rtx_insn *insn;
25642 if (add_operand (rs, Pmode))
25643 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25644 else
25646 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25647 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25648 stack_pointer_rtx));
25649 /* Describe the effect of INSN to the CFI engine. */
25650 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25651 gen_rtx_SET (end_addr,
25652 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25653 rs)));
25655 RTX_FRAME_RELATED_P (insn) = 1;
25657 /* Emit the loop. */
25658 if (TARGET_64BIT)
25659 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25660 stack_pointer_rtx, orig_sp,
25661 end_addr));
25662 else
25663 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25664 stack_pointer_rtx, orig_sp,
25665 end_addr));
25666 RTX_FRAME_RELATED_P (retval) = 1;
25667 /* Describe the effect of INSN to the CFI engine. */
25668 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25669 gen_rtx_SET (stack_pointer_rtx, end_addr));
25671 /* Emit a blockage to ensure the allocation/probing insns are
25672 not optimized, combined, removed, etc. Other cases handle this
25673 within their call to rs6000_emit_allocate_stack_1. */
25674 emit_insn (gen_blockage ());
25676 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25679 if (orig_size != rounded_size)
25681 /* Allocate (and implicitly probe) any residual space. */
25682 HOST_WIDE_INT residual = orig_size - rounded_size;
25684 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25686 /* If the residual was the only allocation, then we can return the
25687 allocating insn. */
25688 if (!retval)
25689 retval = insn;
25692 return retval;
25695 /* Emit the correct code for allocating stack space, as insns.
25696 If COPY_REG, make sure a copy of the old frame is left there.
25697 The generated code may use hard register 0 as a temporary. */
25699 static rtx_insn *
25700 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25702 rtx_insn *insn;
25703 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25704 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25705 rtx todec = gen_int_mode (-size, Pmode);
25707 if (INTVAL (todec) != -size)
25709 warning (0, "stack frame too large");
25710 emit_insn (gen_trap ());
25711 return 0;
25714 if (crtl->limit_stack)
25716 if (REG_P (stack_limit_rtx)
25717 && REGNO (stack_limit_rtx) > 1
25718 && REGNO (stack_limit_rtx) <= 31)
25720 rtx_insn *insn
25721 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25722 gcc_assert (insn);
25723 emit_insn (insn);
25724 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25726 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25727 && TARGET_32BIT
25728 && DEFAULT_ABI == ABI_V4
25729 && !flag_pic)
25731 rtx toload = gen_rtx_CONST (VOIDmode,
25732 gen_rtx_PLUS (Pmode,
25733 stack_limit_rtx,
25734 GEN_INT (size)));
25736 emit_insn (gen_elf_high (tmp_reg, toload));
25737 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25738 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25739 const0_rtx));
25741 else
25742 warning (0, "stack limit expression is not supported");
25745 if (flag_stack_clash_protection)
25747 if (size < get_stack_clash_protection_guard_size ())
25748 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25749 else
25751 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25752 copy_reg);
25754 /* If we asked for a copy with an offset, then we still need add in
25755 the offset. */
25756 if (copy_reg && copy_off)
25757 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25758 return insn;
25762 if (copy_reg)
25764 if (copy_off != 0)
25765 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25766 else
25767 emit_move_insn (copy_reg, stack_reg);
25770 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25771 it now and set the alias set/attributes. The above gen_*_update
25772 calls will generate a PARALLEL with the MEM set being the first
25773 operation. */
25774 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25775 return insn;
25778 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25780 #if PROBE_INTERVAL > 32768
25781 #error Cannot use indexed addressing mode for stack probing
25782 #endif
25784 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25785 inclusive. These are offsets from the current stack pointer. */
25787 static void
25788 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25790 /* See if we have a constant small number of probes to generate. If so,
25791 that's the easy case. */
25792 if (first + size <= 32768)
25794 HOST_WIDE_INT i;
25796 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25797 it exceeds SIZE. If only one probe is needed, this will not
25798 generate any code. Then probe at FIRST + SIZE. */
25799 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25800 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25801 -(first + i)));
25803 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25804 -(first + size)));
25807 /* Otherwise, do the same as above, but in a loop. Note that we must be
25808 extra careful with variables wrapping around because we might be at
25809 the very top (or the very bottom) of the address space and we have
25810 to be able to handle this case properly; in particular, we use an
25811 equality test for the loop condition. */
25812 else
25814 HOST_WIDE_INT rounded_size;
25815 rtx r12 = gen_rtx_REG (Pmode, 12);
25816 rtx r0 = gen_rtx_REG (Pmode, 0);
25818 /* Sanity check for the addressing mode we're going to use. */
25819 gcc_assert (first <= 32768);
25821 /* Step 1: round SIZE to the previous multiple of the interval. */
25823 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25826 /* Step 2: compute initial and final value of the loop counter. */
25828 /* TEST_ADDR = SP + FIRST. */
25829 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25830 -first)));
25832 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25833 if (rounded_size > 32768)
25835 emit_move_insn (r0, GEN_INT (-rounded_size));
25836 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25838 else
25839 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25840 -rounded_size)));
25843 /* Step 3: the loop
25847 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25848 probe at TEST_ADDR
25850 while (TEST_ADDR != LAST_ADDR)
25852 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25853 until it is equal to ROUNDED_SIZE. */
25855 if (TARGET_64BIT)
25856 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25857 else
25858 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25861 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25862 that SIZE is equal to ROUNDED_SIZE. */
25864 if (size != rounded_size)
25865 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25869 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25870 addresses, not offsets. */
25872 static const char *
25873 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25875 static int labelno = 0;
25876 char loop_lab[32];
25877 rtx xops[2];
25879 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25881 /* Loop. */
25882 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25884 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25885 xops[0] = reg1;
25886 xops[1] = GEN_INT (-PROBE_INTERVAL);
25887 output_asm_insn ("addi %0,%0,%1", xops);
25889 /* Probe at TEST_ADDR. */
25890 xops[1] = gen_rtx_REG (Pmode, 0);
25891 output_asm_insn ("stw %1,0(%0)", xops);
25893 /* Test if TEST_ADDR == LAST_ADDR. */
25894 xops[1] = reg2;
25895 if (TARGET_64BIT)
25896 output_asm_insn ("cmpd 0,%0,%1", xops);
25897 else
25898 output_asm_insn ("cmpw 0,%0,%1", xops);
25900 /* Branch. */
25901 fputs ("\tbne 0,", asm_out_file);
25902 assemble_name_raw (asm_out_file, loop_lab);
25903 fputc ('\n', asm_out_file);
25905 return "";
25908 /* This function is called when rs6000_frame_related is processing
25909 SETs within a PARALLEL, and returns whether the REGNO save ought to
25910 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25911 for out-of-line register save functions, store multiple, and the
25912 Darwin world_save. They may contain registers that don't really
25913 need saving. */
25915 static bool
25916 interesting_frame_related_regno (unsigned int regno)
25918 /* Saves apparently of r0 are actually saving LR. It doesn't make
25919 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25920 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25921 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25922 as frame related. */
25923 if (regno == 0)
25924 return true;
25925 /* If we see CR2 then we are here on a Darwin world save. Saves of
25926 CR2 signify the whole CR is being saved. This is a long-standing
25927 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25928 that CR needs to be saved. */
25929 if (regno == CR2_REGNO)
25930 return true;
25931 /* Omit frame info for any user-defined global regs. If frame info
25932 is supplied for them, frame unwinding will restore a user reg.
25933 Also omit frame info for any reg we don't need to save, as that
25934 bloats frame info and can cause problems with shrink wrapping.
25935 Since global regs won't be seen as needing to be saved, both of
25936 these conditions are covered by save_reg_p. */
25937 return save_reg_p (regno);
25940 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25941 addresses, not offsets.
25943 REG2 contains the backchain that must be stored into *sp at each allocation.
25945 This is subtly different than the Ada probing above in that it tries hard
25946 to prevent attacks that jump the stack guard. Thus, it is never allowed
25947 to allocate more than PROBE_INTERVAL bytes of stack space without a
25948 suitable probe. */
25950 static const char *
25951 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25953 static int labelno = 0;
25954 char loop_lab[32];
25955 rtx xops[3];
25957 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25959 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25961 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25963 /* This allocates and probes. */
25964 xops[0] = reg1;
25965 xops[1] = reg2;
25966 xops[2] = GEN_INT (-probe_interval);
25967 if (TARGET_64BIT)
25968 output_asm_insn ("stdu %1,%2(%0)", xops);
25969 else
25970 output_asm_insn ("stwu %1,%2(%0)", xops);
25972 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25973 xops[0] = reg1;
25974 xops[1] = reg3;
25975 if (TARGET_64BIT)
25976 output_asm_insn ("cmpd 0,%0,%1", xops);
25977 else
25978 output_asm_insn ("cmpw 0,%0,%1", xops);
25980 fputs ("\tbne 0,", asm_out_file);
25981 assemble_name_raw (asm_out_file, loop_lab);
25982 fputc ('\n', asm_out_file);
25984 return "";
25987 /* Wrapper around the output_probe_stack_range routines. */
25988 const char *
25989 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25991 if (flag_stack_clash_protection)
25992 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25993 else
25994 return output_probe_stack_range_1 (reg1, reg3);
25997 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25998 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25999 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26000 deduce these equivalences by itself so it wasn't necessary to hold
26001 its hand so much. Don't be tempted to always supply d2_f_d_e with
26002 the actual cfa register, ie. r31 when we are using a hard frame
26003 pointer. That fails when saving regs off r1, and sched moves the
26004 r31 setup past the reg saves. */
26006 static rtx_insn *
26007 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
26008 rtx reg2, rtx repl2)
26010 rtx repl;
26012 if (REGNO (reg) == STACK_POINTER_REGNUM)
26014 gcc_checking_assert (val == 0);
26015 repl = NULL_RTX;
26017 else
26018 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26019 GEN_INT (val));
26021 rtx pat = PATTERN (insn);
26022 if (!repl && !reg2)
26024 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26025 if (GET_CODE (pat) == PARALLEL)
26026 for (int i = 0; i < XVECLEN (pat, 0); i++)
26027 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26029 rtx set = XVECEXP (pat, 0, i);
26031 if (!REG_P (SET_SRC (set))
26032 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26033 RTX_FRAME_RELATED_P (set) = 1;
26035 RTX_FRAME_RELATED_P (insn) = 1;
26036 return insn;
26039 /* We expect that 'pat' is either a SET or a PARALLEL containing
26040 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26041 are important so they all have to be marked RTX_FRAME_RELATED_P.
26042 Call simplify_replace_rtx on the SETs rather than the whole insn
26043 so as to leave the other stuff alone (for example USE of r12). */
26045 set_used_flags (pat);
26046 if (GET_CODE (pat) == SET)
26048 if (repl)
26049 pat = simplify_replace_rtx (pat, reg, repl);
26050 if (reg2)
26051 pat = simplify_replace_rtx (pat, reg2, repl2);
26053 else if (GET_CODE (pat) == PARALLEL)
26055 pat = shallow_copy_rtx (pat);
26056 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26058 for (int i = 0; i < XVECLEN (pat, 0); i++)
26059 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26061 rtx set = XVECEXP (pat, 0, i);
26063 if (repl)
26064 set = simplify_replace_rtx (set, reg, repl);
26065 if (reg2)
26066 set = simplify_replace_rtx (set, reg2, repl2);
26067 XVECEXP (pat, 0, i) = set;
26069 if (!REG_P (SET_SRC (set))
26070 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26071 RTX_FRAME_RELATED_P (set) = 1;
26074 else
26075 gcc_unreachable ();
26077 RTX_FRAME_RELATED_P (insn) = 1;
26078 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26080 return insn;
26083 /* Returns an insn that has a vrsave set operation with the
26084 appropriate CLOBBERs. */
26086 static rtx
26087 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26089 int nclobs, i;
26090 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26091 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26093 clobs[0]
26094 = gen_rtx_SET (vrsave,
26095 gen_rtx_UNSPEC_VOLATILE (SImode,
26096 gen_rtvec (2, reg, vrsave),
26097 UNSPECV_SET_VRSAVE));
26099 nclobs = 1;
26101 /* We need to clobber the registers in the mask so the scheduler
26102 does not move sets to VRSAVE before sets of AltiVec registers.
26104 However, if the function receives nonlocal gotos, reload will set
26105 all call saved registers live. We will end up with:
26107 (set (reg 999) (mem))
26108 (parallel [ (set (reg vrsave) (unspec blah))
26109 (clobber (reg 999))])
26111 The clobber will cause the store into reg 999 to be dead, and
26112 flow will attempt to delete an epilogue insn. In this case, we
26113 need an unspec use/set of the register. */
26115 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26116 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26118 if (!epiloguep || call_used_regs [i])
26119 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
26120 gen_rtx_REG (V4SImode, i));
26121 else
26123 rtx reg = gen_rtx_REG (V4SImode, i);
26125 clobs[nclobs++]
26126 = gen_rtx_SET (reg,
26127 gen_rtx_UNSPEC (V4SImode,
26128 gen_rtvec (1, reg), 27));
26132 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26134 for (i = 0; i < nclobs; ++i)
26135 XVECEXP (insn, 0, i) = clobs[i];
26137 return insn;
26140 static rtx
26141 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26143 rtx addr, mem;
26145 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26146 mem = gen_frame_mem (GET_MODE (reg), addr);
26147 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26150 static rtx
26151 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26153 return gen_frame_set (reg, frame_reg, offset, false);
26156 static rtx
26157 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26159 return gen_frame_set (reg, frame_reg, offset, true);
26162 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26163 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26165 static rtx_insn *
26166 emit_frame_save (rtx frame_reg, machine_mode mode,
26167 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26169 rtx reg;
26171 /* Some cases that need register indexed addressing. */
26172 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26173 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26175 reg = gen_rtx_REG (mode, regno);
26176 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26177 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26178 NULL_RTX, NULL_RTX);
26181 /* Emit an offset memory reference suitable for a frame store, while
26182 converting to a valid addressing mode. */
26184 static rtx
26185 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26187 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26190 #ifndef TARGET_FIX_AND_CONTINUE
26191 #define TARGET_FIX_AND_CONTINUE 0
26192 #endif
26194 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26195 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26196 #define LAST_SAVRES_REGISTER 31
26197 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26199 enum {
26200 SAVRES_LR = 0x1,
26201 SAVRES_SAVE = 0x2,
26202 SAVRES_REG = 0x0c,
26203 SAVRES_GPR = 0,
26204 SAVRES_FPR = 4,
26205 SAVRES_VR = 8
26208 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26210 /* Temporary holding space for an out-of-line register save/restore
26211 routine name. */
26212 static char savres_routine_name[30];
26214 /* Return the name for an out-of-line register save/restore routine.
26215 We are saving/restoring GPRs if GPR is true. */
26217 static char *
26218 rs6000_savres_routine_name (int regno, int sel)
26220 const char *prefix = "";
26221 const char *suffix = "";
26223 /* Different targets are supposed to define
26224 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26225 routine name could be defined with:
26227 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26229 This is a nice idea in practice, but in reality, things are
26230 complicated in several ways:
26232 - ELF targets have save/restore routines for GPRs.
26234 - PPC64 ELF targets have routines for save/restore of GPRs that
26235 differ in what they do with the link register, so having a set
26236 prefix doesn't work. (We only use one of the save routines at
26237 the moment, though.)
26239 - PPC32 elf targets have "exit" versions of the restore routines
26240 that restore the link register and can save some extra space.
26241 These require an extra suffix. (There are also "tail" versions
26242 of the restore routines and "GOT" versions of the save routines,
26243 but we don't generate those at present. Same problems apply,
26244 though.)
26246 We deal with all this by synthesizing our own prefix/suffix and
26247 using that for the simple sprintf call shown above. */
26248 if (DEFAULT_ABI == ABI_V4)
26250 if (TARGET_64BIT)
26251 goto aix_names;
26253 if ((sel & SAVRES_REG) == SAVRES_GPR)
26254 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26255 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26256 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26257 else if ((sel & SAVRES_REG) == SAVRES_VR)
26258 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26259 else
26260 abort ();
26262 if ((sel & SAVRES_LR))
26263 suffix = "_x";
26265 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26267 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26268 /* No out-of-line save/restore routines for GPRs on AIX. */
26269 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26270 #endif
26272 aix_names:
26273 if ((sel & SAVRES_REG) == SAVRES_GPR)
26274 prefix = ((sel & SAVRES_SAVE)
26275 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26276 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26277 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26279 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26280 if ((sel & SAVRES_LR))
26281 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26282 else
26283 #endif
26285 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26286 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26289 else if ((sel & SAVRES_REG) == SAVRES_VR)
26290 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26291 else
26292 abort ();
26295 if (DEFAULT_ABI == ABI_DARWIN)
26297 /* The Darwin approach is (slightly) different, in order to be
26298 compatible with code generated by the system toolchain. There is a
26299 single symbol for the start of save sequence, and the code here
26300 embeds an offset into that code on the basis of the first register
26301 to be saved. */
26302 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26303 if ((sel & SAVRES_REG) == SAVRES_GPR)
26304 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26305 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26306 (regno - 13) * 4, prefix, regno);
26307 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26308 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26309 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26310 else if ((sel & SAVRES_REG) == SAVRES_VR)
26311 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26312 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26313 else
26314 abort ();
26316 else
26317 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26319 return savres_routine_name;
26322 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26323 We are saving/restoring GPRs if GPR is true. */
26325 static rtx
26326 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26328 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26329 ? info->first_gp_reg_save
26330 : (sel & SAVRES_REG) == SAVRES_FPR
26331 ? info->first_fp_reg_save - 32
26332 : (sel & SAVRES_REG) == SAVRES_VR
26333 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26334 : -1);
26335 rtx sym;
26336 int select = sel;
26338 /* Don't generate bogus routine names. */
26339 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26340 && regno <= LAST_SAVRES_REGISTER
26341 && select >= 0 && select <= 12);
26343 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26345 if (sym == NULL)
26347 char *name;
26349 name = rs6000_savres_routine_name (regno, sel);
26351 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26352 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26353 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26356 return sym;
26359 /* Emit a sequence of insns, including a stack tie if needed, for
26360 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26361 reset the stack pointer, but move the base of the frame into
26362 reg UPDT_REGNO for use by out-of-line register restore routines. */
26364 static rtx
26365 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26366 unsigned updt_regno)
26368 /* If there is nothing to do, don't do anything. */
26369 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26370 return NULL_RTX;
26372 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26374 /* This blockage is needed so that sched doesn't decide to move
26375 the sp change before the register restores. */
26376 if (DEFAULT_ABI == ABI_V4)
26377 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26378 GEN_INT (frame_off)));
26380 /* If we are restoring registers out-of-line, we will be using the
26381 "exit" variants of the restore routines, which will reset the
26382 stack for us. But we do need to point updt_reg into the
26383 right place for those routines. */
26384 if (frame_off != 0)
26385 return emit_insn (gen_add3_insn (updt_reg_rtx,
26386 frame_reg_rtx, GEN_INT (frame_off)));
26387 else
26388 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26390 return NULL_RTX;
26393 /* Return the register number used as a pointer by out-of-line
26394 save/restore functions. */
26396 static inline unsigned
26397 ptr_regno_for_savres (int sel)
26399 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26400 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26401 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26404 /* Construct a parallel rtx describing the effect of a call to an
26405 out-of-line register save/restore routine, and emit the insn
26406 or jump_insn as appropriate. */
26408 static rtx_insn *
26409 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26410 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26411 machine_mode reg_mode, int sel)
26413 int i;
26414 int offset, start_reg, end_reg, n_regs, use_reg;
26415 int reg_size = GET_MODE_SIZE (reg_mode);
26416 rtx sym;
26417 rtvec p;
26418 rtx par;
26419 rtx_insn *insn;
26421 offset = 0;
26422 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26423 ? info->first_gp_reg_save
26424 : (sel & SAVRES_REG) == SAVRES_FPR
26425 ? info->first_fp_reg_save
26426 : (sel & SAVRES_REG) == SAVRES_VR
26427 ? info->first_altivec_reg_save
26428 : -1);
26429 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26430 ? 32
26431 : (sel & SAVRES_REG) == SAVRES_FPR
26432 ? 64
26433 : (sel & SAVRES_REG) == SAVRES_VR
26434 ? LAST_ALTIVEC_REGNO + 1
26435 : -1);
26436 n_regs = end_reg - start_reg;
26437 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26438 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26439 + n_regs);
26441 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26442 RTVEC_ELT (p, offset++) = ret_rtx;
26444 RTVEC_ELT (p, offset++)
26445 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26447 sym = rs6000_savres_routine_sym (info, sel);
26448 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26450 use_reg = ptr_regno_for_savres (sel);
26451 if ((sel & SAVRES_REG) == SAVRES_VR)
26453 /* Vector regs are saved/restored using [reg+reg] addressing. */
26454 RTVEC_ELT (p, offset++)
26455 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26456 RTVEC_ELT (p, offset++)
26457 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26459 else
26460 RTVEC_ELT (p, offset++)
26461 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26463 for (i = 0; i < end_reg - start_reg; i++)
26464 RTVEC_ELT (p, i + offset)
26465 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26466 frame_reg_rtx, save_area_offset + reg_size * i,
26467 (sel & SAVRES_SAVE) != 0);
26469 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26470 RTVEC_ELT (p, i + offset)
26471 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26473 par = gen_rtx_PARALLEL (VOIDmode, p);
26475 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26477 insn = emit_jump_insn (par);
26478 JUMP_LABEL (insn) = ret_rtx;
26480 else
26481 insn = emit_insn (par);
26482 return insn;
26485 /* Emit prologue code to store CR fields that need to be saved into REG. This
26486 function should only be called when moving the non-volatile CRs to REG, it
26487 is not a general purpose routine to move the entire set of CRs to REG.
26488 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26489 volatile CRs. */
26491 static void
26492 rs6000_emit_prologue_move_from_cr (rtx reg)
26494 /* Only the ELFv2 ABI allows storing only selected fields. */
26495 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26497 int i, cr_reg[8], count = 0;
26499 /* Collect CR fields that must be saved. */
26500 for (i = 0; i < 8; i++)
26501 if (save_reg_p (CR0_REGNO + i))
26502 cr_reg[count++] = i;
26504 /* If it's just a single one, use mfcrf. */
26505 if (count == 1)
26507 rtvec p = rtvec_alloc (1);
26508 rtvec r = rtvec_alloc (2);
26509 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26510 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26511 RTVEC_ELT (p, 0)
26512 = gen_rtx_SET (reg,
26513 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26515 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26516 return;
26519 /* ??? It might be better to handle count == 2 / 3 cases here
26520 as well, using logical operations to combine the values. */
26523 emit_insn (gen_prologue_movesi_from_cr (reg));
26526 /* Return whether the split-stack arg pointer (r12) is used. */
26528 static bool
26529 split_stack_arg_pointer_used_p (void)
26531 /* If the pseudo holding the arg pointer is no longer a pseudo,
26532 then the arg pointer is used. */
26533 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26534 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26535 || (REGNO (cfun->machine->split_stack_arg_pointer)
26536 < FIRST_PSEUDO_REGISTER)))
26537 return true;
26539 /* Unfortunately we also need to do some code scanning, since
26540 r12 may have been substituted for the pseudo. */
26541 rtx_insn *insn;
26542 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26543 FOR_BB_INSNS (bb, insn)
26544 if (NONDEBUG_INSN_P (insn))
26546 /* A call destroys r12. */
26547 if (CALL_P (insn))
26548 return false;
26550 df_ref use;
26551 FOR_EACH_INSN_USE (use, insn)
26553 rtx x = DF_REF_REG (use);
26554 if (REG_P (x) && REGNO (x) == 12)
26555 return true;
26557 df_ref def;
26558 FOR_EACH_INSN_DEF (def, insn)
26560 rtx x = DF_REF_REG (def);
26561 if (REG_P (x) && REGNO (x) == 12)
26562 return false;
26565 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26568 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26570 static bool
26571 rs6000_global_entry_point_needed_p (void)
26573 /* Only needed for the ELFv2 ABI. */
26574 if (DEFAULT_ABI != ABI_ELFv2)
26575 return false;
26577 /* With -msingle-pic-base, we assume the whole program shares the same
26578 TOC, so no global entry point prologues are needed anywhere. */
26579 if (TARGET_SINGLE_PIC_BASE)
26580 return false;
26582 /* Ensure we have a global entry point for thunks. ??? We could
26583 avoid that if the target routine doesn't need a global entry point,
26584 but we do not know whether this is the case at this point. */
26585 if (cfun->is_thunk)
26586 return true;
26588 /* For regular functions, rs6000_emit_prologue sets this flag if the
26589 routine ever uses the TOC pointer. */
26590 return cfun->machine->r2_setup_needed;
26593 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26594 static sbitmap
26595 rs6000_get_separate_components (void)
26597 rs6000_stack_t *info = rs6000_stack_info ();
26599 if (WORLD_SAVE_P (info))
26600 return NULL;
26602 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26603 && !(info->savres_strategy & REST_MULTIPLE));
26605 /* Component 0 is the save/restore of LR (done via GPR0).
26606 Component 2 is the save of the TOC (GPR2).
26607 Components 13..31 are the save/restore of GPR13..GPR31.
26608 Components 46..63 are the save/restore of FPR14..FPR31. */
26610 cfun->machine->n_components = 64;
26612 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26613 bitmap_clear (components);
26615 int reg_size = TARGET_32BIT ? 4 : 8;
26616 int fp_reg_size = 8;
26618 /* The GPRs we need saved to the frame. */
26619 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26620 && (info->savres_strategy & REST_INLINE_GPRS))
26622 int offset = info->gp_save_offset;
26623 if (info->push_p)
26624 offset += info->total_size;
26626 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26628 if (IN_RANGE (offset, -0x8000, 0x7fff)
26629 && save_reg_p (regno))
26630 bitmap_set_bit (components, regno);
26632 offset += reg_size;
26636 /* Don't mess with the hard frame pointer. */
26637 if (frame_pointer_needed)
26638 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26640 /* Don't mess with the fixed TOC register. */
26641 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26642 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26643 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26644 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26646 /* The FPRs we need saved to the frame. */
26647 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26648 && (info->savres_strategy & REST_INLINE_FPRS))
26650 int offset = info->fp_save_offset;
26651 if (info->push_p)
26652 offset += info->total_size;
26654 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26656 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26657 bitmap_set_bit (components, regno);
26659 offset += fp_reg_size;
26663 /* Optimize LR save and restore if we can. This is component 0. Any
26664 out-of-line register save/restore routines need LR. */
26665 if (info->lr_save_p
26666 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26667 && (info->savres_strategy & SAVE_INLINE_GPRS)
26668 && (info->savres_strategy & REST_INLINE_GPRS)
26669 && (info->savres_strategy & SAVE_INLINE_FPRS)
26670 && (info->savres_strategy & REST_INLINE_FPRS)
26671 && (info->savres_strategy & SAVE_INLINE_VRS)
26672 && (info->savres_strategy & REST_INLINE_VRS))
26674 int offset = info->lr_save_offset;
26675 if (info->push_p)
26676 offset += info->total_size;
26677 if (IN_RANGE (offset, -0x8000, 0x7fff))
26678 bitmap_set_bit (components, 0);
26681 /* Optimize saving the TOC. This is component 2. */
26682 if (cfun->machine->save_toc_in_prologue)
26683 bitmap_set_bit (components, 2);
26685 return components;
26688 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26689 static sbitmap
26690 rs6000_components_for_bb (basic_block bb)
26692 rs6000_stack_t *info = rs6000_stack_info ();
26694 bitmap in = DF_LIVE_IN (bb);
26695 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26696 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26698 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26699 bitmap_clear (components);
26701 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26703 /* GPRs. */
26704 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26705 if (bitmap_bit_p (in, regno)
26706 || bitmap_bit_p (gen, regno)
26707 || bitmap_bit_p (kill, regno))
26708 bitmap_set_bit (components, regno);
26710 /* FPRs. */
26711 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26712 if (bitmap_bit_p (in, regno)
26713 || bitmap_bit_p (gen, regno)
26714 || bitmap_bit_p (kill, regno))
26715 bitmap_set_bit (components, regno);
26717 /* The link register. */
26718 if (bitmap_bit_p (in, LR_REGNO)
26719 || bitmap_bit_p (gen, LR_REGNO)
26720 || bitmap_bit_p (kill, LR_REGNO))
26721 bitmap_set_bit (components, 0);
26723 /* The TOC save. */
26724 if (bitmap_bit_p (in, TOC_REGNUM)
26725 || bitmap_bit_p (gen, TOC_REGNUM)
26726 || bitmap_bit_p (kill, TOC_REGNUM))
26727 bitmap_set_bit (components, 2);
26729 return components;
26732 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26733 static void
26734 rs6000_disqualify_components (sbitmap components, edge e,
26735 sbitmap edge_components, bool /*is_prologue*/)
26737 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26738 live where we want to place that code. */
26739 if (bitmap_bit_p (edge_components, 0)
26740 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26742 if (dump_file)
26743 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26744 "on entry to bb %d\n", e->dest->index);
26745 bitmap_clear_bit (components, 0);
26749 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26750 static void
26751 rs6000_emit_prologue_components (sbitmap components)
26753 rs6000_stack_t *info = rs6000_stack_info ();
26754 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26755 ? HARD_FRAME_POINTER_REGNUM
26756 : STACK_POINTER_REGNUM);
26758 machine_mode reg_mode = Pmode;
26759 int reg_size = TARGET_32BIT ? 4 : 8;
26760 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26761 ? DFmode : SFmode;
26762 int fp_reg_size = 8;
26764 /* Prologue for LR. */
26765 if (bitmap_bit_p (components, 0))
26767 rtx reg = gen_rtx_REG (reg_mode, 0);
26768 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26769 RTX_FRAME_RELATED_P (insn) = 1;
26770 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26772 int offset = info->lr_save_offset;
26773 if (info->push_p)
26774 offset += info->total_size;
26776 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26777 RTX_FRAME_RELATED_P (insn) = 1;
26778 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26779 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26780 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26783 /* Prologue for TOC. */
26784 if (bitmap_bit_p (components, 2))
26786 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26787 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26788 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26791 /* Prologue for the GPRs. */
26792 int offset = info->gp_save_offset;
26793 if (info->push_p)
26794 offset += info->total_size;
26796 for (int i = info->first_gp_reg_save; i < 32; i++)
26798 if (bitmap_bit_p (components, i))
26800 rtx reg = gen_rtx_REG (reg_mode, i);
26801 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26802 RTX_FRAME_RELATED_P (insn) = 1;
26803 rtx set = copy_rtx (single_set (insn));
26804 add_reg_note (insn, REG_CFA_OFFSET, set);
26807 offset += reg_size;
26810 /* Prologue for the FPRs. */
26811 offset = info->fp_save_offset;
26812 if (info->push_p)
26813 offset += info->total_size;
26815 for (int i = info->first_fp_reg_save; i < 64; i++)
26817 if (bitmap_bit_p (components, i))
26819 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26820 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26821 RTX_FRAME_RELATED_P (insn) = 1;
26822 rtx set = copy_rtx (single_set (insn));
26823 add_reg_note (insn, REG_CFA_OFFSET, set);
26826 offset += fp_reg_size;
26830 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26831 static void
26832 rs6000_emit_epilogue_components (sbitmap components)
26834 rs6000_stack_t *info = rs6000_stack_info ();
26835 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26836 ? HARD_FRAME_POINTER_REGNUM
26837 : STACK_POINTER_REGNUM);
26839 machine_mode reg_mode = Pmode;
26840 int reg_size = TARGET_32BIT ? 4 : 8;
26842 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26843 ? DFmode : SFmode;
26844 int fp_reg_size = 8;
26846 /* Epilogue for the FPRs. */
26847 int offset = info->fp_save_offset;
26848 if (info->push_p)
26849 offset += info->total_size;
26851 for (int i = info->first_fp_reg_save; i < 64; i++)
26853 if (bitmap_bit_p (components, i))
26855 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26856 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26857 RTX_FRAME_RELATED_P (insn) = 1;
26858 add_reg_note (insn, REG_CFA_RESTORE, reg);
26861 offset += fp_reg_size;
26864 /* Epilogue for the GPRs. */
26865 offset = info->gp_save_offset;
26866 if (info->push_p)
26867 offset += info->total_size;
26869 for (int i = info->first_gp_reg_save; i < 32; i++)
26871 if (bitmap_bit_p (components, i))
26873 rtx reg = gen_rtx_REG (reg_mode, i);
26874 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26875 RTX_FRAME_RELATED_P (insn) = 1;
26876 add_reg_note (insn, REG_CFA_RESTORE, reg);
26879 offset += reg_size;
26882 /* Epilogue for LR. */
26883 if (bitmap_bit_p (components, 0))
26885 int offset = info->lr_save_offset;
26886 if (info->push_p)
26887 offset += info->total_size;
26889 rtx reg = gen_rtx_REG (reg_mode, 0);
26890 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26892 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26893 insn = emit_move_insn (lr, reg);
26894 RTX_FRAME_RELATED_P (insn) = 1;
26895 add_reg_note (insn, REG_CFA_RESTORE, lr);
26899 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26900 static void
26901 rs6000_set_handled_components (sbitmap components)
26903 rs6000_stack_t *info = rs6000_stack_info ();
26905 for (int i = info->first_gp_reg_save; i < 32; i++)
26906 if (bitmap_bit_p (components, i))
26907 cfun->machine->gpr_is_wrapped_separately[i] = true;
26909 for (int i = info->first_fp_reg_save; i < 64; i++)
26910 if (bitmap_bit_p (components, i))
26911 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26913 if (bitmap_bit_p (components, 0))
26914 cfun->machine->lr_is_wrapped_separately = true;
26916 if (bitmap_bit_p (components, 2))
26917 cfun->machine->toc_is_wrapped_separately = true;
26920 /* VRSAVE is a bit vector representing which AltiVec registers
26921 are used. The OS uses this to determine which vector
26922 registers to save on a context switch. We need to save
26923 VRSAVE on the stack frame, add whatever AltiVec registers we
26924 used in this function, and do the corresponding magic in the
26925 epilogue. */
26926 static void
26927 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26928 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26930 /* Get VRSAVE into a GPR. */
26931 rtx reg = gen_rtx_REG (SImode, save_regno);
26932 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26933 if (TARGET_MACHO)
26934 emit_insn (gen_get_vrsave_internal (reg));
26935 else
26936 emit_insn (gen_rtx_SET (reg, vrsave));
26938 /* Save VRSAVE. */
26939 int offset = info->vrsave_save_offset + frame_off;
26940 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26942 /* Include the registers in the mask. */
26943 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26945 emit_insn (generate_set_vrsave (reg, info, 0));
26948 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26949 called, it left the arg pointer to the old stack in r29. Otherwise, the
26950 arg pointer is the top of the current frame. */
26951 static void
26952 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26953 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26955 cfun->machine->split_stack_argp_used = true;
26957 if (sp_adjust)
26959 rtx r12 = gen_rtx_REG (Pmode, 12);
26960 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26961 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26962 emit_insn_before (set_r12, sp_adjust);
26964 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26966 rtx r12 = gen_rtx_REG (Pmode, 12);
26967 if (frame_off == 0)
26968 emit_move_insn (r12, frame_reg_rtx);
26969 else
26970 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26973 if (info->push_p)
26975 rtx r12 = gen_rtx_REG (Pmode, 12);
26976 rtx r29 = gen_rtx_REG (Pmode, 29);
26977 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26978 rtx not_more = gen_label_rtx ();
26979 rtx jump;
26981 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26982 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26983 gen_rtx_LABEL_REF (VOIDmode, not_more),
26984 pc_rtx);
26985 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26986 JUMP_LABEL (jump) = not_more;
26987 LABEL_NUSES (not_more) += 1;
26988 emit_move_insn (r12, r29);
26989 emit_label (not_more);
26993 /* Emit function prologue as insns. */
26995 void
26996 rs6000_emit_prologue (void)
26998 rs6000_stack_t *info = rs6000_stack_info ();
26999 machine_mode reg_mode = Pmode;
27000 int reg_size = TARGET_32BIT ? 4 : 8;
27001 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27002 ? DFmode : SFmode;
27003 int fp_reg_size = 8;
27004 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27005 rtx frame_reg_rtx = sp_reg_rtx;
27006 unsigned int cr_save_regno;
27007 rtx cr_save_rtx = NULL_RTX;
27008 rtx_insn *insn;
27009 int strategy;
27010 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27011 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27012 && call_used_regs[STATIC_CHAIN_REGNUM]);
27013 int using_split_stack = (flag_split_stack
27014 && (lookup_attribute ("no_split_stack",
27015 DECL_ATTRIBUTES (cfun->decl))
27016 == NULL));
27018 /* Offset to top of frame for frame_reg and sp respectively. */
27019 HOST_WIDE_INT frame_off = 0;
27020 HOST_WIDE_INT sp_off = 0;
27021 /* sp_adjust is the stack adjusting instruction, tracked so that the
27022 insn setting up the split-stack arg pointer can be emitted just
27023 prior to it, when r12 is not used here for other purposes. */
27024 rtx_insn *sp_adjust = 0;
27026 #if CHECKING_P
27027 /* Track and check usage of r0, r11, r12. */
27028 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27029 #define START_USE(R) do \
27031 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27032 reg_inuse |= 1 << (R); \
27033 } while (0)
27034 #define END_USE(R) do \
27036 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27037 reg_inuse &= ~(1 << (R)); \
27038 } while (0)
27039 #define NOT_INUSE(R) do \
27041 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27042 } while (0)
27043 #else
27044 #define START_USE(R) do {} while (0)
27045 #define END_USE(R) do {} while (0)
27046 #define NOT_INUSE(R) do {} while (0)
27047 #endif
27049 if (DEFAULT_ABI == ABI_ELFv2
27050 && !TARGET_SINGLE_PIC_BASE)
27052 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27054 /* With -mminimal-toc we may generate an extra use of r2 below. */
27055 if (TARGET_TOC && TARGET_MINIMAL_TOC
27056 && !constant_pool_empty_p ())
27057 cfun->machine->r2_setup_needed = true;
27061 if (flag_stack_usage_info)
27062 current_function_static_stack_size = info->total_size;
27064 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27066 HOST_WIDE_INT size = info->total_size;
27068 if (crtl->is_leaf && !cfun->calls_alloca)
27070 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27071 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27072 size - get_stack_check_protect ());
27074 else if (size > 0)
27075 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27078 if (TARGET_FIX_AND_CONTINUE)
27080 /* gdb on darwin arranges to forward a function from the old
27081 address by modifying the first 5 instructions of the function
27082 to branch to the overriding function. This is necessary to
27083 permit function pointers that point to the old function to
27084 actually forward to the new function. */
27085 emit_insn (gen_nop ());
27086 emit_insn (gen_nop ());
27087 emit_insn (gen_nop ());
27088 emit_insn (gen_nop ());
27089 emit_insn (gen_nop ());
27092 /* Handle world saves specially here. */
27093 if (WORLD_SAVE_P (info))
27095 int i, j, sz;
27096 rtx treg;
27097 rtvec p;
27098 rtx reg0;
27100 /* save_world expects lr in r0. */
27101 reg0 = gen_rtx_REG (Pmode, 0);
27102 if (info->lr_save_p)
27104 insn = emit_move_insn (reg0,
27105 gen_rtx_REG (Pmode, LR_REGNO));
27106 RTX_FRAME_RELATED_P (insn) = 1;
27109 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27110 assumptions about the offsets of various bits of the stack
27111 frame. */
27112 gcc_assert (info->gp_save_offset == -220
27113 && info->fp_save_offset == -144
27114 && info->lr_save_offset == 8
27115 && info->cr_save_offset == 4
27116 && info->push_p
27117 && info->lr_save_p
27118 && (!crtl->calls_eh_return
27119 || info->ehrd_offset == -432)
27120 && info->vrsave_save_offset == -224
27121 && info->altivec_save_offset == -416);
27123 treg = gen_rtx_REG (SImode, 11);
27124 emit_move_insn (treg, GEN_INT (-info->total_size));
27126 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27127 in R11. It also clobbers R12, so beware! */
27129 /* Preserve CR2 for save_world prologues */
27130 sz = 5;
27131 sz += 32 - info->first_gp_reg_save;
27132 sz += 64 - info->first_fp_reg_save;
27133 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27134 p = rtvec_alloc (sz);
27135 j = 0;
27136 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
27137 gen_rtx_REG (SImode,
27138 LR_REGNO));
27139 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27140 gen_rtx_SYMBOL_REF (Pmode,
27141 "*save_world"));
27142 /* We do floats first so that the instruction pattern matches
27143 properly. */
27144 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27145 RTVEC_ELT (p, j++)
27146 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27147 ? DFmode : SFmode,
27148 info->first_fp_reg_save + i),
27149 frame_reg_rtx,
27150 info->fp_save_offset + frame_off + 8 * i);
27151 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27152 RTVEC_ELT (p, j++)
27153 = gen_frame_store (gen_rtx_REG (V4SImode,
27154 info->first_altivec_reg_save + i),
27155 frame_reg_rtx,
27156 info->altivec_save_offset + frame_off + 16 * i);
27157 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27158 RTVEC_ELT (p, j++)
27159 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27160 frame_reg_rtx,
27161 info->gp_save_offset + frame_off + reg_size * i);
27163 /* CR register traditionally saved as CR2. */
27164 RTVEC_ELT (p, j++)
27165 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27166 frame_reg_rtx, info->cr_save_offset + frame_off);
27167 /* Explain about use of R0. */
27168 if (info->lr_save_p)
27169 RTVEC_ELT (p, j++)
27170 = gen_frame_store (reg0,
27171 frame_reg_rtx, info->lr_save_offset + frame_off);
27172 /* Explain what happens to the stack pointer. */
27174 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27175 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27178 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27179 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27180 treg, GEN_INT (-info->total_size));
27181 sp_off = frame_off = info->total_size;
27184 strategy = info->savres_strategy;
27186 /* For V.4, update stack before we do any saving and set back pointer. */
27187 if (! WORLD_SAVE_P (info)
27188 && info->push_p
27189 && (DEFAULT_ABI == ABI_V4
27190 || crtl->calls_eh_return))
27192 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27193 || !(strategy & SAVE_INLINE_GPRS)
27194 || !(strategy & SAVE_INLINE_VRS));
27195 int ptr_regno = -1;
27196 rtx ptr_reg = NULL_RTX;
27197 int ptr_off = 0;
27199 if (info->total_size < 32767)
27200 frame_off = info->total_size;
27201 else if (need_r11)
27202 ptr_regno = 11;
27203 else if (info->cr_save_p
27204 || info->lr_save_p
27205 || info->first_fp_reg_save < 64
27206 || info->first_gp_reg_save < 32
27207 || info->altivec_size != 0
27208 || info->vrsave_size != 0
27209 || crtl->calls_eh_return)
27210 ptr_regno = 12;
27211 else
27213 /* The prologue won't be saving any regs so there is no need
27214 to set up a frame register to access any frame save area.
27215 We also won't be using frame_off anywhere below, but set
27216 the correct value anyway to protect against future
27217 changes to this function. */
27218 frame_off = info->total_size;
27220 if (ptr_regno != -1)
27222 /* Set up the frame offset to that needed by the first
27223 out-of-line save function. */
27224 START_USE (ptr_regno);
27225 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27226 frame_reg_rtx = ptr_reg;
27227 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27228 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27229 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27230 ptr_off = info->gp_save_offset + info->gp_size;
27231 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27232 ptr_off = info->altivec_save_offset + info->altivec_size;
27233 frame_off = -ptr_off;
27235 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27236 ptr_reg, ptr_off);
27237 if (REGNO (frame_reg_rtx) == 12)
27238 sp_adjust = 0;
27239 sp_off = info->total_size;
27240 if (frame_reg_rtx != sp_reg_rtx)
27241 rs6000_emit_stack_tie (frame_reg_rtx, false);
27244 /* If we use the link register, get it into r0. */
27245 if (!WORLD_SAVE_P (info) && info->lr_save_p
27246 && !cfun->machine->lr_is_wrapped_separately)
27248 rtx addr, reg, mem;
27250 reg = gen_rtx_REG (Pmode, 0);
27251 START_USE (0);
27252 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27253 RTX_FRAME_RELATED_P (insn) = 1;
27255 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27256 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27258 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27259 GEN_INT (info->lr_save_offset + frame_off));
27260 mem = gen_rtx_MEM (Pmode, addr);
27261 /* This should not be of rs6000_sr_alias_set, because of
27262 __builtin_return_address. */
27264 insn = emit_move_insn (mem, reg);
27265 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27266 NULL_RTX, NULL_RTX);
27267 END_USE (0);
27271 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27272 r12 will be needed by out-of-line gpr restore. */
27273 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27274 && !(strategy & (SAVE_INLINE_GPRS
27275 | SAVE_NOINLINE_GPRS_SAVES_LR))
27276 ? 11 : 12);
27277 if (!WORLD_SAVE_P (info)
27278 && info->cr_save_p
27279 && REGNO (frame_reg_rtx) != cr_save_regno
27280 && !(using_static_chain_p && cr_save_regno == 11)
27281 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27283 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27284 START_USE (cr_save_regno);
27285 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27288 /* Do any required saving of fpr's. If only one or two to save, do
27289 it ourselves. Otherwise, call function. */
27290 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27292 int offset = info->fp_save_offset + frame_off;
27293 for (int i = info->first_fp_reg_save; i < 64; i++)
27295 if (save_reg_p (i)
27296 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27297 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27298 sp_off - frame_off);
27300 offset += fp_reg_size;
27303 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27305 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27306 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27307 unsigned ptr_regno = ptr_regno_for_savres (sel);
27308 rtx ptr_reg = frame_reg_rtx;
27310 if (REGNO (frame_reg_rtx) == ptr_regno)
27311 gcc_checking_assert (frame_off == 0);
27312 else
27314 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27315 NOT_INUSE (ptr_regno);
27316 emit_insn (gen_add3_insn (ptr_reg,
27317 frame_reg_rtx, GEN_INT (frame_off)));
27319 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27320 info->fp_save_offset,
27321 info->lr_save_offset,
27322 DFmode, sel);
27323 rs6000_frame_related (insn, ptr_reg, sp_off,
27324 NULL_RTX, NULL_RTX);
27325 if (lr)
27326 END_USE (0);
27329 /* Save GPRs. This is done as a PARALLEL if we are using
27330 the store-multiple instructions. */
27331 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27333 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27334 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27335 unsigned ptr_regno = ptr_regno_for_savres (sel);
27336 rtx ptr_reg = frame_reg_rtx;
27337 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27338 int end_save = info->gp_save_offset + info->gp_size;
27339 int ptr_off;
27341 if (ptr_regno == 12)
27342 sp_adjust = 0;
27343 if (!ptr_set_up)
27344 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27346 /* Need to adjust r11 (r12) if we saved any FPRs. */
27347 if (end_save + frame_off != 0)
27349 rtx offset = GEN_INT (end_save + frame_off);
27351 if (ptr_set_up)
27352 frame_off = -end_save;
27353 else
27354 NOT_INUSE (ptr_regno);
27355 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27357 else if (!ptr_set_up)
27359 NOT_INUSE (ptr_regno);
27360 emit_move_insn (ptr_reg, frame_reg_rtx);
27362 ptr_off = -end_save;
27363 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27364 info->gp_save_offset + ptr_off,
27365 info->lr_save_offset + ptr_off,
27366 reg_mode, sel);
27367 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27368 NULL_RTX, NULL_RTX);
27369 if (lr)
27370 END_USE (0);
27372 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27374 rtvec p;
27375 int i;
27376 p = rtvec_alloc (32 - info->first_gp_reg_save);
27377 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27378 RTVEC_ELT (p, i)
27379 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27380 frame_reg_rtx,
27381 info->gp_save_offset + frame_off + reg_size * i);
27382 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27383 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27384 NULL_RTX, NULL_RTX);
27386 else if (!WORLD_SAVE_P (info))
27388 int offset = info->gp_save_offset + frame_off;
27389 for (int i = info->first_gp_reg_save; i < 32; i++)
27391 if (save_reg_p (i)
27392 && !cfun->machine->gpr_is_wrapped_separately[i])
27393 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27394 sp_off - frame_off);
27396 offset += reg_size;
27400 if (crtl->calls_eh_return)
27402 unsigned int i;
27403 rtvec p;
27405 for (i = 0; ; ++i)
27407 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27408 if (regno == INVALID_REGNUM)
27409 break;
27412 p = rtvec_alloc (i);
27414 for (i = 0; ; ++i)
27416 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27417 if (regno == INVALID_REGNUM)
27418 break;
27420 rtx set
27421 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27422 sp_reg_rtx,
27423 info->ehrd_offset + sp_off + reg_size * (int) i);
27424 RTVEC_ELT (p, i) = set;
27425 RTX_FRAME_RELATED_P (set) = 1;
27428 insn = emit_insn (gen_blockage ());
27429 RTX_FRAME_RELATED_P (insn) = 1;
27430 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27433 /* In AIX ABI we need to make sure r2 is really saved. */
27434 if (TARGET_AIX && crtl->calls_eh_return)
27436 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27437 rtx join_insn, note;
27438 rtx_insn *save_insn;
27439 long toc_restore_insn;
27441 tmp_reg = gen_rtx_REG (Pmode, 11);
27442 tmp_reg_si = gen_rtx_REG (SImode, 11);
27443 if (using_static_chain_p)
27445 START_USE (0);
27446 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27448 else
27449 START_USE (11);
27450 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27451 /* Peek at instruction to which this function returns. If it's
27452 restoring r2, then we know we've already saved r2. We can't
27453 unconditionally save r2 because the value we have will already
27454 be updated if we arrived at this function via a plt call or
27455 toc adjusting stub. */
27456 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27457 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27458 + RS6000_TOC_SAVE_SLOT);
27459 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27460 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27461 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27462 validate_condition_mode (EQ, CCUNSmode);
27463 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27464 emit_insn (gen_rtx_SET (compare_result,
27465 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27466 toc_save_done = gen_label_rtx ();
27467 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27468 gen_rtx_EQ (VOIDmode, compare_result,
27469 const0_rtx),
27470 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27471 pc_rtx);
27472 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27473 JUMP_LABEL (jump) = toc_save_done;
27474 LABEL_NUSES (toc_save_done) += 1;
27476 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27477 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27478 sp_off - frame_off);
27480 emit_label (toc_save_done);
27482 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27483 have a CFG that has different saves along different paths.
27484 Move the note to a dummy blockage insn, which describes that
27485 R2 is unconditionally saved after the label. */
27486 /* ??? An alternate representation might be a special insn pattern
27487 containing both the branch and the store. That might let the
27488 code that minimizes the number of DW_CFA_advance opcodes better
27489 freedom in placing the annotations. */
27490 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27491 if (note)
27492 remove_note (save_insn, note);
27493 else
27494 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27495 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27496 RTX_FRAME_RELATED_P (save_insn) = 0;
27498 join_insn = emit_insn (gen_blockage ());
27499 REG_NOTES (join_insn) = note;
27500 RTX_FRAME_RELATED_P (join_insn) = 1;
27502 if (using_static_chain_p)
27504 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27505 END_USE (0);
27507 else
27508 END_USE (11);
27511 /* Save CR if we use any that must be preserved. */
27512 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27514 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27515 GEN_INT (info->cr_save_offset + frame_off));
27516 rtx mem = gen_frame_mem (SImode, addr);
27518 /* If we didn't copy cr before, do so now using r0. */
27519 if (cr_save_rtx == NULL_RTX)
27521 START_USE (0);
27522 cr_save_rtx = gen_rtx_REG (SImode, 0);
27523 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27526 /* Saving CR requires a two-instruction sequence: one instruction
27527 to move the CR to a general-purpose register, and a second
27528 instruction that stores the GPR to memory.
27530 We do not emit any DWARF CFI records for the first of these,
27531 because we cannot properly represent the fact that CR is saved in
27532 a register. One reason is that we cannot express that multiple
27533 CR fields are saved; another reason is that on 64-bit, the size
27534 of the CR register in DWARF (4 bytes) differs from the size of
27535 a general-purpose register.
27537 This means if any intervening instruction were to clobber one of
27538 the call-saved CR fields, we'd have incorrect CFI. To prevent
27539 this from happening, we mark the store to memory as a use of
27540 those CR fields, which prevents any such instruction from being
27541 scheduled in between the two instructions. */
27542 rtx crsave_v[9];
27543 int n_crsave = 0;
27544 int i;
27546 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27547 for (i = 0; i < 8; i++)
27548 if (save_reg_p (CR0_REGNO + i))
27549 crsave_v[n_crsave++]
27550 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27552 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27553 gen_rtvec_v (n_crsave, crsave_v)));
27554 END_USE (REGNO (cr_save_rtx));
27556 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27557 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27558 so we need to construct a frame expression manually. */
27559 RTX_FRAME_RELATED_P (insn) = 1;
27561 /* Update address to be stack-pointer relative, like
27562 rs6000_frame_related would do. */
27563 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27564 GEN_INT (info->cr_save_offset + sp_off));
27565 mem = gen_frame_mem (SImode, addr);
27567 if (DEFAULT_ABI == ABI_ELFv2)
27569 /* In the ELFv2 ABI we generate separate CFI records for each
27570 CR field that was actually saved. They all point to the
27571 same 32-bit stack slot. */
27572 rtx crframe[8];
27573 int n_crframe = 0;
27575 for (i = 0; i < 8; i++)
27576 if (save_reg_p (CR0_REGNO + i))
27578 crframe[n_crframe]
27579 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27581 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27582 n_crframe++;
27585 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27586 gen_rtx_PARALLEL (VOIDmode,
27587 gen_rtvec_v (n_crframe, crframe)));
27589 else
27591 /* In other ABIs, by convention, we use a single CR regnum to
27592 represent the fact that all call-saved CR fields are saved.
27593 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27594 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27595 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27599 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27600 *separate* slots if the routine calls __builtin_eh_return, so
27601 that they can be independently restored by the unwinder. */
27602 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27604 int i, cr_off = info->ehcr_offset;
27605 rtx crsave;
27607 /* ??? We might get better performance by using multiple mfocrf
27608 instructions. */
27609 crsave = gen_rtx_REG (SImode, 0);
27610 emit_insn (gen_prologue_movesi_from_cr (crsave));
27612 for (i = 0; i < 8; i++)
27613 if (!call_used_regs[CR0_REGNO + i])
27615 rtvec p = rtvec_alloc (2);
27616 RTVEC_ELT (p, 0)
27617 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27618 RTVEC_ELT (p, 1)
27619 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27621 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27623 RTX_FRAME_RELATED_P (insn) = 1;
27624 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27625 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27626 sp_reg_rtx, cr_off + sp_off));
27628 cr_off += reg_size;
27632 /* If we are emitting stack probes, but allocate no stack, then
27633 just note that in the dump file. */
27634 if (flag_stack_clash_protection
27635 && dump_file
27636 && !info->push_p)
27637 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27639 /* Update stack and set back pointer unless this is V.4,
27640 for which it was done previously. */
27641 if (!WORLD_SAVE_P (info) && info->push_p
27642 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27644 rtx ptr_reg = NULL;
27645 int ptr_off = 0;
27647 /* If saving altivec regs we need to be able to address all save
27648 locations using a 16-bit offset. */
27649 if ((strategy & SAVE_INLINE_VRS) == 0
27650 || (info->altivec_size != 0
27651 && (info->altivec_save_offset + info->altivec_size - 16
27652 + info->total_size - frame_off) > 32767)
27653 || (info->vrsave_size != 0
27654 && (info->vrsave_save_offset
27655 + info->total_size - frame_off) > 32767))
27657 int sel = SAVRES_SAVE | SAVRES_VR;
27658 unsigned ptr_regno = ptr_regno_for_savres (sel);
27660 if (using_static_chain_p
27661 && ptr_regno == STATIC_CHAIN_REGNUM)
27662 ptr_regno = 12;
27663 if (REGNO (frame_reg_rtx) != ptr_regno)
27664 START_USE (ptr_regno);
27665 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27666 frame_reg_rtx = ptr_reg;
27667 ptr_off = info->altivec_save_offset + info->altivec_size;
27668 frame_off = -ptr_off;
27670 else if (REGNO (frame_reg_rtx) == 1)
27671 frame_off = info->total_size;
27672 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27673 ptr_reg, ptr_off);
27674 if (REGNO (frame_reg_rtx) == 12)
27675 sp_adjust = 0;
27676 sp_off = info->total_size;
27677 if (frame_reg_rtx != sp_reg_rtx)
27678 rs6000_emit_stack_tie (frame_reg_rtx, false);
27681 /* Set frame pointer, if needed. */
27682 if (frame_pointer_needed)
27684 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27685 sp_reg_rtx);
27686 RTX_FRAME_RELATED_P (insn) = 1;
27689 /* Save AltiVec registers if needed. Save here because the red zone does
27690 not always include AltiVec registers. */
27691 if (!WORLD_SAVE_P (info)
27692 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27694 int end_save = info->altivec_save_offset + info->altivec_size;
27695 int ptr_off;
27696 /* Oddly, the vector save/restore functions point r0 at the end
27697 of the save area, then use r11 or r12 to load offsets for
27698 [reg+reg] addressing. */
27699 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27700 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27701 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27703 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27704 NOT_INUSE (0);
27705 if (scratch_regno == 12)
27706 sp_adjust = 0;
27707 if (end_save + frame_off != 0)
27709 rtx offset = GEN_INT (end_save + frame_off);
27711 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27713 else
27714 emit_move_insn (ptr_reg, frame_reg_rtx);
27716 ptr_off = -end_save;
27717 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27718 info->altivec_save_offset + ptr_off,
27719 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27720 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27721 NULL_RTX, NULL_RTX);
27722 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27724 /* The oddity mentioned above clobbered our frame reg. */
27725 emit_move_insn (frame_reg_rtx, ptr_reg);
27726 frame_off = ptr_off;
27729 else if (!WORLD_SAVE_P (info)
27730 && info->altivec_size != 0)
27732 int i;
27734 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27735 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27737 rtx areg, savereg, mem;
27738 HOST_WIDE_INT offset;
27740 offset = (info->altivec_save_offset + frame_off
27741 + 16 * (i - info->first_altivec_reg_save));
27743 savereg = gen_rtx_REG (V4SImode, i);
27745 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27747 mem = gen_frame_mem (V4SImode,
27748 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27749 GEN_INT (offset)));
27750 insn = emit_insn (gen_rtx_SET (mem, savereg));
27751 areg = NULL_RTX;
27753 else
27755 NOT_INUSE (0);
27756 areg = gen_rtx_REG (Pmode, 0);
27757 emit_move_insn (areg, GEN_INT (offset));
27759 /* AltiVec addressing mode is [reg+reg]. */
27760 mem = gen_frame_mem (V4SImode,
27761 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27763 /* Rather than emitting a generic move, force use of the stvx
27764 instruction, which we always want on ISA 2.07 (power8) systems.
27765 In particular we don't want xxpermdi/stxvd2x for little
27766 endian. */
27767 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27770 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27771 areg, GEN_INT (offset));
27775 /* VRSAVE is a bit vector representing which AltiVec registers
27776 are used. The OS uses this to determine which vector
27777 registers to save on a context switch. We need to save
27778 VRSAVE on the stack frame, add whatever AltiVec registers we
27779 used in this function, and do the corresponding magic in the
27780 epilogue. */
27782 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27784 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27785 be using r12 as frame_reg_rtx and r11 as the static chain
27786 pointer for nested functions. */
27787 int save_regno = 12;
27788 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27789 && !using_static_chain_p)
27790 save_regno = 11;
27791 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27793 save_regno = 11;
27794 if (using_static_chain_p)
27795 save_regno = 0;
27797 NOT_INUSE (save_regno);
27799 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27802 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27803 if (!TARGET_SINGLE_PIC_BASE
27804 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27805 && !constant_pool_empty_p ())
27806 || (DEFAULT_ABI == ABI_V4
27807 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27808 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27810 /* If emit_load_toc_table will use the link register, we need to save
27811 it. We use R12 for this purpose because emit_load_toc_table
27812 can use register 0. This allows us to use a plain 'blr' to return
27813 from the procedure more often. */
27814 int save_LR_around_toc_setup = (TARGET_ELF
27815 && DEFAULT_ABI == ABI_V4
27816 && flag_pic
27817 && ! info->lr_save_p
27818 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27819 if (save_LR_around_toc_setup)
27821 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27822 rtx tmp = gen_rtx_REG (Pmode, 12);
27824 sp_adjust = 0;
27825 insn = emit_move_insn (tmp, lr);
27826 RTX_FRAME_RELATED_P (insn) = 1;
27828 rs6000_emit_load_toc_table (TRUE);
27830 insn = emit_move_insn (lr, tmp);
27831 add_reg_note (insn, REG_CFA_RESTORE, lr);
27832 RTX_FRAME_RELATED_P (insn) = 1;
27834 else
27835 rs6000_emit_load_toc_table (TRUE);
27838 #if TARGET_MACHO
27839 if (!TARGET_SINGLE_PIC_BASE
27840 && DEFAULT_ABI == ABI_DARWIN
27841 && flag_pic && crtl->uses_pic_offset_table)
27843 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27844 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27846 /* Save and restore LR locally around this call (in R0). */
27847 if (!info->lr_save_p)
27848 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27850 emit_insn (gen_load_macho_picbase (src));
27852 emit_move_insn (gen_rtx_REG (Pmode,
27853 RS6000_PIC_OFFSET_TABLE_REGNUM),
27854 lr);
27856 if (!info->lr_save_p)
27857 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27859 #endif
27861 /* If we need to, save the TOC register after doing the stack setup.
27862 Do not emit eh frame info for this save. The unwinder wants info,
27863 conceptually attached to instructions in this function, about
27864 register values in the caller of this function. This R2 may have
27865 already been changed from the value in the caller.
27866 We don't attempt to write accurate DWARF EH frame info for R2
27867 because code emitted by gcc for a (non-pointer) function call
27868 doesn't save and restore R2. Instead, R2 is managed out-of-line
27869 by a linker generated plt call stub when the function resides in
27870 a shared library. This behavior is costly to describe in DWARF,
27871 both in terms of the size of DWARF info and the time taken in the
27872 unwinder to interpret it. R2 changes, apart from the
27873 calls_eh_return case earlier in this function, are handled by
27874 linux-unwind.h frob_update_context. */
27875 if (rs6000_save_toc_in_prologue_p ()
27876 && !cfun->machine->toc_is_wrapped_separately)
27878 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27879 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27882 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27883 if (using_split_stack && split_stack_arg_pointer_used_p ())
27884 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27887 /* Output .extern statements for the save/restore routines we use. */
27889 static void
27890 rs6000_output_savres_externs (FILE *file)
27892 rs6000_stack_t *info = rs6000_stack_info ();
27894 if (TARGET_DEBUG_STACK)
27895 debug_stack_info (info);
27897 /* Write .extern for any function we will call to save and restore
27898 fp values. */
27899 if (info->first_fp_reg_save < 64
27900 && !TARGET_MACHO
27901 && !TARGET_ELF)
27903 char *name;
27904 int regno = info->first_fp_reg_save - 32;
27906 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27908 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27909 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27910 name = rs6000_savres_routine_name (regno, sel);
27911 fprintf (file, "\t.extern %s\n", name);
27913 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27915 bool lr = (info->savres_strategy
27916 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27917 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27918 name = rs6000_savres_routine_name (regno, sel);
27919 fprintf (file, "\t.extern %s\n", name);
27924 /* Write function prologue. */
27926 static void
27927 rs6000_output_function_prologue (FILE *file)
27929 if (!cfun->is_thunk)
27930 rs6000_output_savres_externs (file);
27932 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27933 immediately after the global entry point label. */
27934 if (rs6000_global_entry_point_needed_p ())
27936 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27938 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27940 if (TARGET_CMODEL != CMODEL_LARGE)
27942 /* In the small and medium code models, we assume the TOC is less
27943 2 GB away from the text section, so it can be computed via the
27944 following two-instruction sequence. */
27945 char buf[256];
27947 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27948 fprintf (file, "0:\taddis 2,12,.TOC.-");
27949 assemble_name (file, buf);
27950 fprintf (file, "@ha\n");
27951 fprintf (file, "\taddi 2,2,.TOC.-");
27952 assemble_name (file, buf);
27953 fprintf (file, "@l\n");
27955 else
27957 /* In the large code model, we allow arbitrary offsets between the
27958 TOC and the text section, so we have to load the offset from
27959 memory. The data field is emitted directly before the global
27960 entry point in rs6000_elf_declare_function_name. */
27961 char buf[256];
27963 #ifdef HAVE_AS_ENTRY_MARKERS
27964 /* If supported by the linker, emit a marker relocation. If the
27965 total code size of the final executable or shared library
27966 happens to fit into 2 GB after all, the linker will replace
27967 this code sequence with the sequence for the small or medium
27968 code model. */
27969 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27970 #endif
27971 fprintf (file, "\tld 2,");
27972 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27973 assemble_name (file, buf);
27974 fprintf (file, "-");
27975 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27976 assemble_name (file, buf);
27977 fprintf (file, "(12)\n");
27978 fprintf (file, "\tadd 2,2,12\n");
27981 fputs ("\t.localentry\t", file);
27982 assemble_name (file, name);
27983 fputs (",.-", file);
27984 assemble_name (file, name);
27985 fputs ("\n", file);
27988 /* Output -mprofile-kernel code. This needs to be done here instead of
27989 in output_function_profile since it must go after the ELFv2 ABI
27990 local entry point. */
27991 if (TARGET_PROFILE_KERNEL && crtl->profile)
27993 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27994 gcc_assert (!TARGET_32BIT);
27996 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27998 /* In the ELFv2 ABI we have no compiler stack word. It must be
27999 the resposibility of _mcount to preserve the static chain
28000 register if required. */
28001 if (DEFAULT_ABI != ABI_ELFv2
28002 && cfun->static_chain_decl != NULL)
28004 asm_fprintf (file, "\tstd %s,24(%s)\n",
28005 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28006 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28007 asm_fprintf (file, "\tld %s,24(%s)\n",
28008 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28010 else
28011 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28014 rs6000_pic_labelno++;
28017 /* -mprofile-kernel code calls mcount before the function prolog,
28018 so a profiled leaf function should stay a leaf function. */
28019 static bool
28020 rs6000_keep_leaf_when_profiled ()
28022 return TARGET_PROFILE_KERNEL;
28025 /* Non-zero if vmx regs are restored before the frame pop, zero if
28026 we restore after the pop when possible. */
28027 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28029 /* Restoring cr is a two step process: loading a reg from the frame
28030 save, then moving the reg to cr. For ABI_V4 we must let the
28031 unwinder know that the stack location is no longer valid at or
28032 before the stack deallocation, but we can't emit a cfa_restore for
28033 cr at the stack deallocation like we do for other registers.
28034 The trouble is that it is possible for the move to cr to be
28035 scheduled after the stack deallocation. So say exactly where cr
28036 is located on each of the two insns. */
28038 static rtx
28039 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28041 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28042 rtx reg = gen_rtx_REG (SImode, regno);
28043 rtx_insn *insn = emit_move_insn (reg, mem);
28045 if (!exit_func && DEFAULT_ABI == ABI_V4)
28047 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28048 rtx set = gen_rtx_SET (reg, cr);
28050 add_reg_note (insn, REG_CFA_REGISTER, set);
28051 RTX_FRAME_RELATED_P (insn) = 1;
28053 return reg;
28056 /* Reload CR from REG. */
28058 static void
28059 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28061 int count = 0;
28062 int i;
28064 if (using_mfcr_multiple)
28066 for (i = 0; i < 8; i++)
28067 if (save_reg_p (CR0_REGNO + i))
28068 count++;
28069 gcc_assert (count);
28072 if (using_mfcr_multiple && count > 1)
28074 rtx_insn *insn;
28075 rtvec p;
28076 int ndx;
28078 p = rtvec_alloc (count);
28080 ndx = 0;
28081 for (i = 0; i < 8; i++)
28082 if (save_reg_p (CR0_REGNO + i))
28084 rtvec r = rtvec_alloc (2);
28085 RTVEC_ELT (r, 0) = reg;
28086 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28087 RTVEC_ELT (p, ndx) =
28088 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28089 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28090 ndx++;
28092 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28093 gcc_assert (ndx == count);
28095 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28096 CR field separately. */
28097 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28099 for (i = 0; i < 8; i++)
28100 if (save_reg_p (CR0_REGNO + i))
28101 add_reg_note (insn, REG_CFA_RESTORE,
28102 gen_rtx_REG (SImode, CR0_REGNO + i));
28104 RTX_FRAME_RELATED_P (insn) = 1;
28107 else
28108 for (i = 0; i < 8; i++)
28109 if (save_reg_p (CR0_REGNO + i))
28111 rtx insn = emit_insn (gen_movsi_to_cr_one
28112 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28114 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28115 CR field separately, attached to the insn that in fact
28116 restores this particular CR field. */
28117 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28119 add_reg_note (insn, REG_CFA_RESTORE,
28120 gen_rtx_REG (SImode, CR0_REGNO + i));
28122 RTX_FRAME_RELATED_P (insn) = 1;
28126 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28127 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28128 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28130 rtx_insn *insn = get_last_insn ();
28131 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28133 add_reg_note (insn, REG_CFA_RESTORE, cr);
28134 RTX_FRAME_RELATED_P (insn) = 1;
28138 /* Like cr, the move to lr instruction can be scheduled after the
28139 stack deallocation, but unlike cr, its stack frame save is still
28140 valid. So we only need to emit the cfa_restore on the correct
28141 instruction. */
28143 static void
28144 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28146 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28147 rtx reg = gen_rtx_REG (Pmode, regno);
28149 emit_move_insn (reg, mem);
28152 static void
28153 restore_saved_lr (int regno, bool exit_func)
28155 rtx reg = gen_rtx_REG (Pmode, regno);
28156 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28157 rtx_insn *insn = emit_move_insn (lr, reg);
28159 if (!exit_func && flag_shrink_wrap)
28161 add_reg_note (insn, REG_CFA_RESTORE, lr);
28162 RTX_FRAME_RELATED_P (insn) = 1;
28166 static rtx
28167 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28169 if (DEFAULT_ABI == ABI_ELFv2)
28171 int i;
28172 for (i = 0; i < 8; i++)
28173 if (save_reg_p (CR0_REGNO + i))
28175 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28176 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28177 cfa_restores);
28180 else if (info->cr_save_p)
28181 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28182 gen_rtx_REG (SImode, CR2_REGNO),
28183 cfa_restores);
28185 if (info->lr_save_p)
28186 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28187 gen_rtx_REG (Pmode, LR_REGNO),
28188 cfa_restores);
28189 return cfa_restores;
28192 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28193 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28194 below stack pointer not cloberred by signals. */
28196 static inline bool
28197 offset_below_red_zone_p (HOST_WIDE_INT offset)
28199 return offset < (DEFAULT_ABI == ABI_V4
28201 : TARGET_32BIT ? -220 : -288);
28204 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28206 static void
28207 emit_cfa_restores (rtx cfa_restores)
28209 rtx_insn *insn = get_last_insn ();
28210 rtx *loc = &REG_NOTES (insn);
28212 while (*loc)
28213 loc = &XEXP (*loc, 1);
28214 *loc = cfa_restores;
28215 RTX_FRAME_RELATED_P (insn) = 1;
28218 /* Emit function epilogue as insns. */
28220 void
28221 rs6000_emit_epilogue (int sibcall)
28223 rs6000_stack_t *info;
28224 int restoring_GPRs_inline;
28225 int restoring_FPRs_inline;
28226 int using_load_multiple;
28227 int using_mtcr_multiple;
28228 int use_backchain_to_restore_sp;
28229 int restore_lr;
28230 int strategy;
28231 HOST_WIDE_INT frame_off = 0;
28232 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28233 rtx frame_reg_rtx = sp_reg_rtx;
28234 rtx cfa_restores = NULL_RTX;
28235 rtx insn;
28236 rtx cr_save_reg = NULL_RTX;
28237 machine_mode reg_mode = Pmode;
28238 int reg_size = TARGET_32BIT ? 4 : 8;
28239 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
28240 ? DFmode : SFmode;
28241 int fp_reg_size = 8;
28242 int i;
28243 bool exit_func;
28244 unsigned ptr_regno;
28246 info = rs6000_stack_info ();
28248 strategy = info->savres_strategy;
28249 using_load_multiple = strategy & REST_MULTIPLE;
28250 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28251 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28252 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
28253 || rs6000_cpu == PROCESSOR_PPC603
28254 || rs6000_cpu == PROCESSOR_PPC750
28255 || optimize_size);
28256 /* Restore via the backchain when we have a large frame, since this
28257 is more efficient than an addis, addi pair. The second condition
28258 here will not trigger at the moment; We don't actually need a
28259 frame pointer for alloca, but the generic parts of the compiler
28260 give us one anyway. */
28261 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28262 ? info->lr_save_offset
28263 : 0) > 32767
28264 || (cfun->calls_alloca
28265 && !frame_pointer_needed));
28266 restore_lr = (info->lr_save_p
28267 && (restoring_FPRs_inline
28268 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28269 && (restoring_GPRs_inline
28270 || info->first_fp_reg_save < 64)
28271 && !cfun->machine->lr_is_wrapped_separately);
28274 if (WORLD_SAVE_P (info))
28276 int i, j;
28277 char rname[30];
28278 const char *alloc_rname;
28279 rtvec p;
28281 /* eh_rest_world_r10 will return to the location saved in the LR
28282 stack slot (which is not likely to be our caller.)
28283 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28284 rest_world is similar, except any R10 parameter is ignored.
28285 The exception-handling stuff that was here in 2.95 is no
28286 longer necessary. */
28288 p = rtvec_alloc (9
28289 + 32 - info->first_gp_reg_save
28290 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28291 + 63 + 1 - info->first_fp_reg_save);
28293 strcpy (rname, ((crtl->calls_eh_return) ?
28294 "*eh_rest_world_r10" : "*rest_world"));
28295 alloc_rname = ggc_strdup (rname);
28297 j = 0;
28298 RTVEC_ELT (p, j++) = ret_rtx;
28299 RTVEC_ELT (p, j++)
28300 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28301 /* The instruction pattern requires a clobber here;
28302 it is shared with the restVEC helper. */
28303 RTVEC_ELT (p, j++)
28304 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28307 /* CR register traditionally saved as CR2. */
28308 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28309 RTVEC_ELT (p, j++)
28310 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28311 if (flag_shrink_wrap)
28313 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28314 gen_rtx_REG (Pmode, LR_REGNO),
28315 cfa_restores);
28316 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28320 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28322 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28323 RTVEC_ELT (p, j++)
28324 = gen_frame_load (reg,
28325 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28326 if (flag_shrink_wrap
28327 && save_reg_p (info->first_gp_reg_save + i))
28328 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28330 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28332 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28333 RTVEC_ELT (p, j++)
28334 = gen_frame_load (reg,
28335 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28336 if (flag_shrink_wrap
28337 && save_reg_p (info->first_altivec_reg_save + i))
28338 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28340 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28342 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28343 ? DFmode : SFmode),
28344 info->first_fp_reg_save + i);
28345 RTVEC_ELT (p, j++)
28346 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28347 if (flag_shrink_wrap
28348 && save_reg_p (info->first_fp_reg_save + i))
28349 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28351 RTVEC_ELT (p, j++)
28352 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28353 RTVEC_ELT (p, j++)
28354 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28355 RTVEC_ELT (p, j++)
28356 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28357 RTVEC_ELT (p, j++)
28358 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28359 RTVEC_ELT (p, j++)
28360 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28361 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28363 if (flag_shrink_wrap)
28365 REG_NOTES (insn) = cfa_restores;
28366 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28367 RTX_FRAME_RELATED_P (insn) = 1;
28369 return;
28372 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28373 if (info->push_p)
28374 frame_off = info->total_size;
28376 /* Restore AltiVec registers if we must do so before adjusting the
28377 stack. */
28378 if (info->altivec_size != 0
28379 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28380 || (DEFAULT_ABI != ABI_V4
28381 && offset_below_red_zone_p (info->altivec_save_offset))))
28383 int i;
28384 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28386 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28387 if (use_backchain_to_restore_sp)
28389 int frame_regno = 11;
28391 if ((strategy & REST_INLINE_VRS) == 0)
28393 /* Of r11 and r12, select the one not clobbered by an
28394 out-of-line restore function for the frame register. */
28395 frame_regno = 11 + 12 - scratch_regno;
28397 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28398 emit_move_insn (frame_reg_rtx,
28399 gen_rtx_MEM (Pmode, sp_reg_rtx));
28400 frame_off = 0;
28402 else if (frame_pointer_needed)
28403 frame_reg_rtx = hard_frame_pointer_rtx;
28405 if ((strategy & REST_INLINE_VRS) == 0)
28407 int end_save = info->altivec_save_offset + info->altivec_size;
28408 int ptr_off;
28409 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28410 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28412 if (end_save + frame_off != 0)
28414 rtx offset = GEN_INT (end_save + frame_off);
28416 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28418 else
28419 emit_move_insn (ptr_reg, frame_reg_rtx);
28421 ptr_off = -end_save;
28422 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28423 info->altivec_save_offset + ptr_off,
28424 0, V4SImode, SAVRES_VR);
28426 else
28428 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28429 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28431 rtx addr, areg, mem, insn;
28432 rtx reg = gen_rtx_REG (V4SImode, i);
28433 HOST_WIDE_INT offset
28434 = (info->altivec_save_offset + frame_off
28435 + 16 * (i - info->first_altivec_reg_save));
28437 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28439 mem = gen_frame_mem (V4SImode,
28440 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28441 GEN_INT (offset)));
28442 insn = gen_rtx_SET (reg, mem);
28444 else
28446 areg = gen_rtx_REG (Pmode, 0);
28447 emit_move_insn (areg, GEN_INT (offset));
28449 /* AltiVec addressing mode is [reg+reg]. */
28450 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28451 mem = gen_frame_mem (V4SImode, addr);
28453 /* Rather than emitting a generic move, force use of the
28454 lvx instruction, which we always want. In particular we
28455 don't want lxvd2x/xxpermdi for little endian. */
28456 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28459 (void) emit_insn (insn);
28463 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28464 if (((strategy & REST_INLINE_VRS) == 0
28465 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28466 && (flag_shrink_wrap
28467 || (offset_below_red_zone_p
28468 (info->altivec_save_offset
28469 + 16 * (i - info->first_altivec_reg_save))))
28470 && save_reg_p (i))
28472 rtx reg = gen_rtx_REG (V4SImode, i);
28473 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28477 /* Restore VRSAVE if we must do so before adjusting the stack. */
28478 if (info->vrsave_size != 0
28479 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28480 || (DEFAULT_ABI != ABI_V4
28481 && offset_below_red_zone_p (info->vrsave_save_offset))))
28483 rtx reg;
28485 if (frame_reg_rtx == sp_reg_rtx)
28487 if (use_backchain_to_restore_sp)
28489 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28490 emit_move_insn (frame_reg_rtx,
28491 gen_rtx_MEM (Pmode, sp_reg_rtx));
28492 frame_off = 0;
28494 else if (frame_pointer_needed)
28495 frame_reg_rtx = hard_frame_pointer_rtx;
28498 reg = gen_rtx_REG (SImode, 12);
28499 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28500 info->vrsave_save_offset + frame_off));
28502 emit_insn (generate_set_vrsave (reg, info, 1));
28505 insn = NULL_RTX;
28506 /* If we have a large stack frame, restore the old stack pointer
28507 using the backchain. */
28508 if (use_backchain_to_restore_sp)
28510 if (frame_reg_rtx == sp_reg_rtx)
28512 /* Under V.4, don't reset the stack pointer until after we're done
28513 loading the saved registers. */
28514 if (DEFAULT_ABI == ABI_V4)
28515 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28517 insn = emit_move_insn (frame_reg_rtx,
28518 gen_rtx_MEM (Pmode, sp_reg_rtx));
28519 frame_off = 0;
28521 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28522 && DEFAULT_ABI == ABI_V4)
28523 /* frame_reg_rtx has been set up by the altivec restore. */
28525 else
28527 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28528 frame_reg_rtx = sp_reg_rtx;
28531 /* If we have a frame pointer, we can restore the old stack pointer
28532 from it. */
28533 else if (frame_pointer_needed)
28535 frame_reg_rtx = sp_reg_rtx;
28536 if (DEFAULT_ABI == ABI_V4)
28537 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28538 /* Prevent reordering memory accesses against stack pointer restore. */
28539 else if (cfun->calls_alloca
28540 || offset_below_red_zone_p (-info->total_size))
28541 rs6000_emit_stack_tie (frame_reg_rtx, true);
28543 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28544 GEN_INT (info->total_size)));
28545 frame_off = 0;
28547 else if (info->push_p
28548 && DEFAULT_ABI != ABI_V4
28549 && !crtl->calls_eh_return)
28551 /* Prevent reordering memory accesses against stack pointer restore. */
28552 if (cfun->calls_alloca
28553 || offset_below_red_zone_p (-info->total_size))
28554 rs6000_emit_stack_tie (frame_reg_rtx, false);
28555 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28556 GEN_INT (info->total_size)));
28557 frame_off = 0;
28559 if (insn && frame_reg_rtx == sp_reg_rtx)
28561 if (cfa_restores)
28563 REG_NOTES (insn) = cfa_restores;
28564 cfa_restores = NULL_RTX;
28566 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28567 RTX_FRAME_RELATED_P (insn) = 1;
28570 /* Restore AltiVec registers if we have not done so already. */
28571 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28572 && info->altivec_size != 0
28573 && (DEFAULT_ABI == ABI_V4
28574 || !offset_below_red_zone_p (info->altivec_save_offset)))
28576 int i;
28578 if ((strategy & REST_INLINE_VRS) == 0)
28580 int end_save = info->altivec_save_offset + info->altivec_size;
28581 int ptr_off;
28582 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28583 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28584 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28586 if (end_save + frame_off != 0)
28588 rtx offset = GEN_INT (end_save + frame_off);
28590 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28592 else
28593 emit_move_insn (ptr_reg, frame_reg_rtx);
28595 ptr_off = -end_save;
28596 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28597 info->altivec_save_offset + ptr_off,
28598 0, V4SImode, SAVRES_VR);
28599 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28601 /* Frame reg was clobbered by out-of-line save. Restore it
28602 from ptr_reg, and if we are calling out-of-line gpr or
28603 fpr restore set up the correct pointer and offset. */
28604 unsigned newptr_regno = 1;
28605 if (!restoring_GPRs_inline)
28607 bool lr = info->gp_save_offset + info->gp_size == 0;
28608 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28609 newptr_regno = ptr_regno_for_savres (sel);
28610 end_save = info->gp_save_offset + info->gp_size;
28612 else if (!restoring_FPRs_inline)
28614 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28615 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28616 newptr_regno = ptr_regno_for_savres (sel);
28617 end_save = info->fp_save_offset + info->fp_size;
28620 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28621 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28623 if (end_save + ptr_off != 0)
28625 rtx offset = GEN_INT (end_save + ptr_off);
28627 frame_off = -end_save;
28628 if (TARGET_32BIT)
28629 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28630 ptr_reg, offset));
28631 else
28632 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28633 ptr_reg, offset));
28635 else
28637 frame_off = ptr_off;
28638 emit_move_insn (frame_reg_rtx, ptr_reg);
28642 else
28644 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28645 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28647 rtx addr, areg, mem, insn;
28648 rtx reg = gen_rtx_REG (V4SImode, i);
28649 HOST_WIDE_INT offset
28650 = (info->altivec_save_offset + frame_off
28651 + 16 * (i - info->first_altivec_reg_save));
28653 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28655 mem = gen_frame_mem (V4SImode,
28656 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28657 GEN_INT (offset)));
28658 insn = gen_rtx_SET (reg, mem);
28660 else
28662 areg = gen_rtx_REG (Pmode, 0);
28663 emit_move_insn (areg, GEN_INT (offset));
28665 /* AltiVec addressing mode is [reg+reg]. */
28666 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28667 mem = gen_frame_mem (V4SImode, addr);
28669 /* Rather than emitting a generic move, force use of the
28670 lvx instruction, which we always want. In particular we
28671 don't want lxvd2x/xxpermdi for little endian. */
28672 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28675 (void) emit_insn (insn);
28679 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28680 if (((strategy & REST_INLINE_VRS) == 0
28681 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28682 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28683 && save_reg_p (i))
28685 rtx reg = gen_rtx_REG (V4SImode, i);
28686 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28690 /* Restore VRSAVE if we have not done so already. */
28691 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28692 && info->vrsave_size != 0
28693 && (DEFAULT_ABI == ABI_V4
28694 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28696 rtx reg;
28698 reg = gen_rtx_REG (SImode, 12);
28699 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28700 info->vrsave_save_offset + frame_off));
28702 emit_insn (generate_set_vrsave (reg, info, 1));
28705 /* If we exit by an out-of-line restore function on ABI_V4 then that
28706 function will deallocate the stack, so we don't need to worry
28707 about the unwinder restoring cr from an invalid stack frame
28708 location. */
28709 exit_func = (!restoring_FPRs_inline
28710 || (!restoring_GPRs_inline
28711 && info->first_fp_reg_save == 64));
28713 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28714 *separate* slots if the routine calls __builtin_eh_return, so
28715 that they can be independently restored by the unwinder. */
28716 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28718 int i, cr_off = info->ehcr_offset;
28720 for (i = 0; i < 8; i++)
28721 if (!call_used_regs[CR0_REGNO + i])
28723 rtx reg = gen_rtx_REG (SImode, 0);
28724 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28725 cr_off + frame_off));
28727 insn = emit_insn (gen_movsi_to_cr_one
28728 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28730 if (!exit_func && flag_shrink_wrap)
28732 add_reg_note (insn, REG_CFA_RESTORE,
28733 gen_rtx_REG (SImode, CR0_REGNO + i));
28735 RTX_FRAME_RELATED_P (insn) = 1;
28738 cr_off += reg_size;
28742 /* Get the old lr if we saved it. If we are restoring registers
28743 out-of-line, then the out-of-line routines can do this for us. */
28744 if (restore_lr && restoring_GPRs_inline)
28745 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28747 /* Get the old cr if we saved it. */
28748 if (info->cr_save_p)
28750 unsigned cr_save_regno = 12;
28752 if (!restoring_GPRs_inline)
28754 /* Ensure we don't use the register used by the out-of-line
28755 gpr register restore below. */
28756 bool lr = info->gp_save_offset + info->gp_size == 0;
28757 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28758 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28760 if (gpr_ptr_regno == 12)
28761 cr_save_regno = 11;
28762 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28764 else if (REGNO (frame_reg_rtx) == 12)
28765 cr_save_regno = 11;
28767 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28768 info->cr_save_offset + frame_off,
28769 exit_func);
28772 /* Set LR here to try to overlap restores below. */
28773 if (restore_lr && restoring_GPRs_inline)
28774 restore_saved_lr (0, exit_func);
28776 /* Load exception handler data registers, if needed. */
28777 if (crtl->calls_eh_return)
28779 unsigned int i, regno;
28781 if (TARGET_AIX)
28783 rtx reg = gen_rtx_REG (reg_mode, 2);
28784 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28785 frame_off + RS6000_TOC_SAVE_SLOT));
28788 for (i = 0; ; ++i)
28790 rtx mem;
28792 regno = EH_RETURN_DATA_REGNO (i);
28793 if (regno == INVALID_REGNUM)
28794 break;
28796 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28797 info->ehrd_offset + frame_off
28798 + reg_size * (int) i);
28800 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28804 /* Restore GPRs. This is done as a PARALLEL if we are using
28805 the load-multiple instructions. */
28806 if (!restoring_GPRs_inline)
28808 /* We are jumping to an out-of-line function. */
28809 rtx ptr_reg;
28810 int end_save = info->gp_save_offset + info->gp_size;
28811 bool can_use_exit = end_save == 0;
28812 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28813 int ptr_off;
28815 /* Emit stack reset code if we need it. */
28816 ptr_regno = ptr_regno_for_savres (sel);
28817 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28818 if (can_use_exit)
28819 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28820 else if (end_save + frame_off != 0)
28821 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28822 GEN_INT (end_save + frame_off)));
28823 else if (REGNO (frame_reg_rtx) != ptr_regno)
28824 emit_move_insn (ptr_reg, frame_reg_rtx);
28825 if (REGNO (frame_reg_rtx) == ptr_regno)
28826 frame_off = -end_save;
28828 if (can_use_exit && info->cr_save_p)
28829 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28831 ptr_off = -end_save;
28832 rs6000_emit_savres_rtx (info, ptr_reg,
28833 info->gp_save_offset + ptr_off,
28834 info->lr_save_offset + ptr_off,
28835 reg_mode, sel);
28837 else if (using_load_multiple)
28839 rtvec p;
28840 p = rtvec_alloc (32 - info->first_gp_reg_save);
28841 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28842 RTVEC_ELT (p, i)
28843 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28844 frame_reg_rtx,
28845 info->gp_save_offset + frame_off + reg_size * i);
28846 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28848 else
28850 int offset = info->gp_save_offset + frame_off;
28851 for (i = info->first_gp_reg_save; i < 32; i++)
28853 if (save_reg_p (i)
28854 && !cfun->machine->gpr_is_wrapped_separately[i])
28856 rtx reg = gen_rtx_REG (reg_mode, i);
28857 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28860 offset += reg_size;
28864 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28866 /* If the frame pointer was used then we can't delay emitting
28867 a REG_CFA_DEF_CFA note. This must happen on the insn that
28868 restores the frame pointer, r31. We may have already emitted
28869 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28870 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28871 be harmless if emitted. */
28872 if (frame_pointer_needed)
28874 insn = get_last_insn ();
28875 add_reg_note (insn, REG_CFA_DEF_CFA,
28876 plus_constant (Pmode, frame_reg_rtx, frame_off));
28877 RTX_FRAME_RELATED_P (insn) = 1;
28880 /* Set up cfa_restores. We always need these when
28881 shrink-wrapping. If not shrink-wrapping then we only need
28882 the cfa_restore when the stack location is no longer valid.
28883 The cfa_restores must be emitted on or before the insn that
28884 invalidates the stack, and of course must not be emitted
28885 before the insn that actually does the restore. The latter
28886 is why it is a bad idea to emit the cfa_restores as a group
28887 on the last instruction here that actually does a restore:
28888 That insn may be reordered with respect to others doing
28889 restores. */
28890 if (flag_shrink_wrap
28891 && !restoring_GPRs_inline
28892 && info->first_fp_reg_save == 64)
28893 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28895 for (i = info->first_gp_reg_save; i < 32; i++)
28896 if (save_reg_p (i)
28897 && !cfun->machine->gpr_is_wrapped_separately[i])
28899 rtx reg = gen_rtx_REG (reg_mode, i);
28900 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28904 if (!restoring_GPRs_inline
28905 && info->first_fp_reg_save == 64)
28907 /* We are jumping to an out-of-line function. */
28908 if (cfa_restores)
28909 emit_cfa_restores (cfa_restores);
28910 return;
28913 if (restore_lr && !restoring_GPRs_inline)
28915 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28916 restore_saved_lr (0, exit_func);
28919 /* Restore fpr's if we need to do it without calling a function. */
28920 if (restoring_FPRs_inline)
28922 int offset = info->fp_save_offset + frame_off;
28923 for (i = info->first_fp_reg_save; i < 64; i++)
28925 if (save_reg_p (i)
28926 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28928 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28929 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28930 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28931 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28932 cfa_restores);
28935 offset += fp_reg_size;
28939 /* If we saved cr, restore it here. Just those that were used. */
28940 if (info->cr_save_p)
28941 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28943 /* If this is V.4, unwind the stack pointer after all of the loads
28944 have been done, or set up r11 if we are restoring fp out of line. */
28945 ptr_regno = 1;
28946 if (!restoring_FPRs_inline)
28948 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28949 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28950 ptr_regno = ptr_regno_for_savres (sel);
28953 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28954 if (REGNO (frame_reg_rtx) == ptr_regno)
28955 frame_off = 0;
28957 if (insn && restoring_FPRs_inline)
28959 if (cfa_restores)
28961 REG_NOTES (insn) = cfa_restores;
28962 cfa_restores = NULL_RTX;
28964 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28965 RTX_FRAME_RELATED_P (insn) = 1;
28968 if (crtl->calls_eh_return)
28970 rtx sa = EH_RETURN_STACKADJ_RTX;
28971 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28974 if (!sibcall && restoring_FPRs_inline)
28976 if (cfa_restores)
28978 /* We can't hang the cfa_restores off a simple return,
28979 since the shrink-wrap code sometimes uses an existing
28980 return. This means there might be a path from
28981 pre-prologue code to this return, and dwarf2cfi code
28982 wants the eh_frame unwinder state to be the same on
28983 all paths to any point. So we need to emit the
28984 cfa_restores before the return. For -m64 we really
28985 don't need epilogue cfa_restores at all, except for
28986 this irritating dwarf2cfi with shrink-wrap
28987 requirement; The stack red-zone means eh_frame info
28988 from the prologue telling the unwinder to restore
28989 from the stack is perfectly good right to the end of
28990 the function. */
28991 emit_insn (gen_blockage ());
28992 emit_cfa_restores (cfa_restores);
28993 cfa_restores = NULL_RTX;
28996 emit_jump_insn (targetm.gen_simple_return ());
28999 if (!sibcall && !restoring_FPRs_inline)
29001 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29002 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
29003 int elt = 0;
29004 RTVEC_ELT (p, elt++) = ret_rtx;
29005 if (lr)
29006 RTVEC_ELT (p, elt++)
29007 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29009 /* We have to restore more than two FP registers, so branch to the
29010 restore function. It will return to our caller. */
29011 int i;
29012 int reg;
29013 rtx sym;
29015 if (flag_shrink_wrap)
29016 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
29018 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
29019 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
29020 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
29021 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
29023 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29025 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
29027 RTVEC_ELT (p, elt++)
29028 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
29029 if (flag_shrink_wrap
29030 && save_reg_p (info->first_fp_reg_save + i))
29031 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29034 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
29037 if (cfa_restores)
29039 if (sibcall)
29040 /* Ensure the cfa_restores are hung off an insn that won't
29041 be reordered above other restores. */
29042 emit_insn (gen_blockage ());
29044 emit_cfa_restores (cfa_restores);
29048 /* Write function epilogue. */
29050 static void
29051 rs6000_output_function_epilogue (FILE *file)
29053 #if TARGET_MACHO
29054 macho_branch_islands ();
29057 rtx_insn *insn = get_last_insn ();
29058 rtx_insn *deleted_debug_label = NULL;
29060 /* Mach-O doesn't support labels at the end of objects, so if
29061 it looks like we might want one, take special action.
29063 First, collect any sequence of deleted debug labels. */
29064 while (insn
29065 && NOTE_P (insn)
29066 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29068 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29069 notes only, instead set their CODE_LABEL_NUMBER to -1,
29070 otherwise there would be code generation differences
29071 in between -g and -g0. */
29072 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29073 deleted_debug_label = insn;
29074 insn = PREV_INSN (insn);
29077 /* Second, if we have:
29078 label:
29079 barrier
29080 then this needs to be detected, so skip past the barrier. */
29082 if (insn && BARRIER_P (insn))
29083 insn = PREV_INSN (insn);
29085 /* Up to now we've only seen notes or barriers. */
29086 if (insn)
29088 if (LABEL_P (insn)
29089 || (NOTE_P (insn)
29090 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29091 /* Trailing label: <barrier>. */
29092 fputs ("\tnop\n", file);
29093 else
29095 /* Lastly, see if we have a completely empty function body. */
29096 while (insn && ! INSN_P (insn))
29097 insn = PREV_INSN (insn);
29098 /* If we don't find any insns, we've got an empty function body;
29099 I.e. completely empty - without a return or branch. This is
29100 taken as the case where a function body has been removed
29101 because it contains an inline __builtin_unreachable(). GCC
29102 states that reaching __builtin_unreachable() means UB so we're
29103 not obliged to do anything special; however, we want
29104 non-zero-sized function bodies. To meet this, and help the
29105 user out, let's trap the case. */
29106 if (insn == NULL)
29107 fputs ("\ttrap\n", file);
29110 else if (deleted_debug_label)
29111 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29112 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29113 CODE_LABEL_NUMBER (insn) = -1;
29115 #endif
29117 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29118 on its format.
29120 We don't output a traceback table if -finhibit-size-directive was
29121 used. The documentation for -finhibit-size-directive reads
29122 ``don't output a @code{.size} assembler directive, or anything
29123 else that would cause trouble if the function is split in the
29124 middle, and the two halves are placed at locations far apart in
29125 memory.'' The traceback table has this property, since it
29126 includes the offset from the start of the function to the
29127 traceback table itself.
29129 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29130 different traceback table. */
29131 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29132 && ! flag_inhibit_size_directive
29133 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29135 const char *fname = NULL;
29136 const char *language_string = lang_hooks.name;
29137 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29138 int i;
29139 int optional_tbtab;
29140 rs6000_stack_t *info = rs6000_stack_info ();
29142 if (rs6000_traceback == traceback_full)
29143 optional_tbtab = 1;
29144 else if (rs6000_traceback == traceback_part)
29145 optional_tbtab = 0;
29146 else
29147 optional_tbtab = !optimize_size && !TARGET_ELF;
29149 if (optional_tbtab)
29151 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29152 while (*fname == '.') /* V.4 encodes . in the name */
29153 fname++;
29155 /* Need label immediately before tbtab, so we can compute
29156 its offset from the function start. */
29157 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29158 ASM_OUTPUT_LABEL (file, fname);
29161 /* The .tbtab pseudo-op can only be used for the first eight
29162 expressions, since it can't handle the possibly variable
29163 length fields that follow. However, if you omit the optional
29164 fields, the assembler outputs zeros for all optional fields
29165 anyways, giving each variable length field is minimum length
29166 (as defined in sys/debug.h). Thus we can not use the .tbtab
29167 pseudo-op at all. */
29169 /* An all-zero word flags the start of the tbtab, for debuggers
29170 that have to find it by searching forward from the entry
29171 point or from the current pc. */
29172 fputs ("\t.long 0\n", file);
29174 /* Tbtab format type. Use format type 0. */
29175 fputs ("\t.byte 0,", file);
29177 /* Language type. Unfortunately, there does not seem to be any
29178 official way to discover the language being compiled, so we
29179 use language_string.
29180 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29181 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29182 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29183 either, so for now use 0. */
29184 if (lang_GNU_C ()
29185 || ! strcmp (language_string, "GNU GIMPLE")
29186 || ! strcmp (language_string, "GNU Go")
29187 || ! strcmp (language_string, "libgccjit"))
29188 i = 0;
29189 else if (! strcmp (language_string, "GNU F77")
29190 || lang_GNU_Fortran ())
29191 i = 1;
29192 else if (! strcmp (language_string, "GNU Pascal"))
29193 i = 2;
29194 else if (! strcmp (language_string, "GNU Ada"))
29195 i = 3;
29196 else if (lang_GNU_CXX ()
29197 || ! strcmp (language_string, "GNU Objective-C++"))
29198 i = 9;
29199 else if (! strcmp (language_string, "GNU Java"))
29200 i = 13;
29201 else if (! strcmp (language_string, "GNU Objective-C"))
29202 i = 14;
29203 else
29204 gcc_unreachable ();
29205 fprintf (file, "%d,", i);
29207 /* 8 single bit fields: global linkage (not set for C extern linkage,
29208 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29209 from start of procedure stored in tbtab, internal function, function
29210 has controlled storage, function has no toc, function uses fp,
29211 function logs/aborts fp operations. */
29212 /* Assume that fp operations are used if any fp reg must be saved. */
29213 fprintf (file, "%d,",
29214 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29216 /* 6 bitfields: function is interrupt handler, name present in
29217 proc table, function calls alloca, on condition directives
29218 (controls stack walks, 3 bits), saves condition reg, saves
29219 link reg. */
29220 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29221 set up as a frame pointer, even when there is no alloca call. */
29222 fprintf (file, "%d,",
29223 ((optional_tbtab << 6)
29224 | ((optional_tbtab & frame_pointer_needed) << 5)
29225 | (info->cr_save_p << 1)
29226 | (info->lr_save_p)));
29228 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29229 (6 bits). */
29230 fprintf (file, "%d,",
29231 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29233 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29234 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29236 if (optional_tbtab)
29238 /* Compute the parameter info from the function decl argument
29239 list. */
29240 tree decl;
29241 int next_parm_info_bit = 31;
29243 for (decl = DECL_ARGUMENTS (current_function_decl);
29244 decl; decl = DECL_CHAIN (decl))
29246 rtx parameter = DECL_INCOMING_RTL (decl);
29247 machine_mode mode = GET_MODE (parameter);
29249 if (GET_CODE (parameter) == REG)
29251 if (SCALAR_FLOAT_MODE_P (mode))
29253 int bits;
29255 float_parms++;
29257 switch (mode)
29259 case E_SFmode:
29260 case E_SDmode:
29261 bits = 0x2;
29262 break;
29264 case E_DFmode:
29265 case E_DDmode:
29266 case E_TFmode:
29267 case E_TDmode:
29268 case E_IFmode:
29269 case E_KFmode:
29270 bits = 0x3;
29271 break;
29273 default:
29274 gcc_unreachable ();
29277 /* If only one bit will fit, don't or in this entry. */
29278 if (next_parm_info_bit > 0)
29279 parm_info |= (bits << (next_parm_info_bit - 1));
29280 next_parm_info_bit -= 2;
29282 else
29284 fixed_parms += ((GET_MODE_SIZE (mode)
29285 + (UNITS_PER_WORD - 1))
29286 / UNITS_PER_WORD);
29287 next_parm_info_bit -= 1;
29293 /* Number of fixed point parameters. */
29294 /* This is actually the number of words of fixed point parameters; thus
29295 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29296 fprintf (file, "%d,", fixed_parms);
29298 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29299 all on stack. */
29300 /* This is actually the number of fp registers that hold parameters;
29301 and thus the maximum value is 13. */
29302 /* Set parameters on stack bit if parameters are not in their original
29303 registers, regardless of whether they are on the stack? Xlc
29304 seems to set the bit when not optimizing. */
29305 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29307 if (optional_tbtab)
29309 /* Optional fields follow. Some are variable length. */
29311 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29312 float, 11 double float. */
29313 /* There is an entry for each parameter in a register, in the order
29314 that they occur in the parameter list. Any intervening arguments
29315 on the stack are ignored. If the list overflows a long (max
29316 possible length 34 bits) then completely leave off all elements
29317 that don't fit. */
29318 /* Only emit this long if there was at least one parameter. */
29319 if (fixed_parms || float_parms)
29320 fprintf (file, "\t.long %d\n", parm_info);
29322 /* Offset from start of code to tb table. */
29323 fputs ("\t.long ", file);
29324 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29325 RS6000_OUTPUT_BASENAME (file, fname);
29326 putc ('-', file);
29327 rs6000_output_function_entry (file, fname);
29328 putc ('\n', file);
29330 /* Interrupt handler mask. */
29331 /* Omit this long, since we never set the interrupt handler bit
29332 above. */
29334 /* Number of CTL (controlled storage) anchors. */
29335 /* Omit this long, since the has_ctl bit is never set above. */
29337 /* Displacement into stack of each CTL anchor. */
29338 /* Omit this list of longs, because there are no CTL anchors. */
29340 /* Length of function name. */
29341 if (*fname == '*')
29342 ++fname;
29343 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29345 /* Function name. */
29346 assemble_string (fname, strlen (fname));
29348 /* Register for alloca automatic storage; this is always reg 31.
29349 Only emit this if the alloca bit was set above. */
29350 if (frame_pointer_needed)
29351 fputs ("\t.byte 31\n", file);
29353 fputs ("\t.align 2\n", file);
29357 /* Arrange to define .LCTOC1 label, if not already done. */
29358 if (need_toc_init)
29360 need_toc_init = 0;
29361 if (!toc_initialized)
29363 switch_to_section (toc_section);
29364 switch_to_section (current_function_section ());
29369 /* -fsplit-stack support. */
29371 /* A SYMBOL_REF for __morestack. */
29372 static GTY(()) rtx morestack_ref;
29374 static rtx
29375 gen_add3_const (rtx rt, rtx ra, long c)
29377 if (TARGET_64BIT)
29378 return gen_adddi3 (rt, ra, GEN_INT (c));
29379 else
29380 return gen_addsi3 (rt, ra, GEN_INT (c));
29383 /* Emit -fsplit-stack prologue, which goes before the regular function
29384 prologue (at local entry point in the case of ELFv2). */
29386 void
29387 rs6000_expand_split_stack_prologue (void)
29389 rs6000_stack_t *info = rs6000_stack_info ();
29390 unsigned HOST_WIDE_INT allocate;
29391 long alloc_hi, alloc_lo;
29392 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29393 rtx_insn *insn;
29395 gcc_assert (flag_split_stack && reload_completed);
29397 if (!info->push_p)
29398 return;
29400 if (global_regs[29])
29402 error ("%qs uses register r29", "-fsplit-stack");
29403 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29404 "conflicts with %qD", global_regs_decl[29]);
29407 allocate = info->total_size;
29408 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29410 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29411 return;
29413 if (morestack_ref == NULL_RTX)
29415 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29416 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29417 | SYMBOL_FLAG_FUNCTION);
29420 r0 = gen_rtx_REG (Pmode, 0);
29421 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29422 r12 = gen_rtx_REG (Pmode, 12);
29423 emit_insn (gen_load_split_stack_limit (r0));
29424 /* Always emit two insns here to calculate the requested stack,
29425 so that the linker can edit them when adjusting size for calling
29426 non-split-stack code. */
29427 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29428 alloc_lo = -allocate - alloc_hi;
29429 if (alloc_hi != 0)
29431 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29432 if (alloc_lo != 0)
29433 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29434 else
29435 emit_insn (gen_nop ());
29437 else
29439 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29440 emit_insn (gen_nop ());
29443 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29444 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29445 ok_label = gen_label_rtx ();
29446 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29447 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29448 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29449 pc_rtx);
29450 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29451 JUMP_LABEL (insn) = ok_label;
29452 /* Mark the jump as very likely to be taken. */
29453 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29455 lr = gen_rtx_REG (Pmode, LR_REGNO);
29456 insn = emit_move_insn (r0, lr);
29457 RTX_FRAME_RELATED_P (insn) = 1;
29458 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29459 RTX_FRAME_RELATED_P (insn) = 1;
29461 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29462 const0_rtx, const0_rtx));
29463 call_fusage = NULL_RTX;
29464 use_reg (&call_fusage, r12);
29465 /* Say the call uses r0, even though it doesn't, to stop regrename
29466 from twiddling with the insns saving lr, trashing args for cfun.
29467 The insns restoring lr are similarly protected by making
29468 split_stack_return use r0. */
29469 use_reg (&call_fusage, r0);
29470 add_function_usage_to (insn, call_fusage);
29471 /* Indicate that this function can't jump to non-local gotos. */
29472 make_reg_eh_region_note_nothrow_nononlocal (insn);
29473 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29474 insn = emit_move_insn (lr, r0);
29475 add_reg_note (insn, REG_CFA_RESTORE, lr);
29476 RTX_FRAME_RELATED_P (insn) = 1;
29477 emit_insn (gen_split_stack_return ());
29479 emit_label (ok_label);
29480 LABEL_NUSES (ok_label) = 1;
29483 /* Return the internal arg pointer used for function incoming
29484 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29485 to copy it to a pseudo in order for it to be preserved over calls
29486 and suchlike. We'd really like to use a pseudo here for the
29487 internal arg pointer but data-flow analysis is not prepared to
29488 accept pseudos as live at the beginning of a function. */
29490 static rtx
29491 rs6000_internal_arg_pointer (void)
29493 if (flag_split_stack
29494 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29495 == NULL))
29498 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29500 rtx pat;
29502 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29503 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29505 /* Put the pseudo initialization right after the note at the
29506 beginning of the function. */
29507 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29508 gen_rtx_REG (Pmode, 12));
29509 push_topmost_sequence ();
29510 emit_insn_after (pat, get_insns ());
29511 pop_topmost_sequence ();
29513 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29514 FIRST_PARM_OFFSET (current_function_decl));
29516 return virtual_incoming_args_rtx;
29519 /* We may have to tell the dataflow pass that the split stack prologue
29520 is initializing a register. */
29522 static void
29523 rs6000_live_on_entry (bitmap regs)
29525 if (flag_split_stack)
29526 bitmap_set_bit (regs, 12);
29529 /* Emit -fsplit-stack dynamic stack allocation space check. */
29531 void
29532 rs6000_split_stack_space_check (rtx size, rtx label)
29534 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29535 rtx limit = gen_reg_rtx (Pmode);
29536 rtx requested = gen_reg_rtx (Pmode);
29537 rtx cmp = gen_reg_rtx (CCUNSmode);
29538 rtx jump;
29540 emit_insn (gen_load_split_stack_limit (limit));
29541 if (CONST_INT_P (size))
29542 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29543 else
29545 size = force_reg (Pmode, size);
29546 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29548 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29549 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29550 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29551 gen_rtx_LABEL_REF (VOIDmode, label),
29552 pc_rtx);
29553 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29554 JUMP_LABEL (jump) = label;
29557 /* A C compound statement that outputs the assembler code for a thunk
29558 function, used to implement C++ virtual function calls with
29559 multiple inheritance. The thunk acts as a wrapper around a virtual
29560 function, adjusting the implicit object parameter before handing
29561 control off to the real function.
29563 First, emit code to add the integer DELTA to the location that
29564 contains the incoming first argument. Assume that this argument
29565 contains a pointer, and is the one used to pass the `this' pointer
29566 in C++. This is the incoming argument *before* the function
29567 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29568 values of all other incoming arguments.
29570 After the addition, emit code to jump to FUNCTION, which is a
29571 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29572 not touch the return address. Hence returning from FUNCTION will
29573 return to whoever called the current `thunk'.
29575 The effect must be as if FUNCTION had been called directly with the
29576 adjusted first argument. This macro is responsible for emitting
29577 all of the code for a thunk function; output_function_prologue()
29578 and output_function_epilogue() are not invoked.
29580 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29581 been extracted from it.) It might possibly be useful on some
29582 targets, but probably not.
29584 If you do not define this macro, the target-independent code in the
29585 C++ frontend will generate a less efficient heavyweight thunk that
29586 calls FUNCTION instead of jumping to it. The generic approach does
29587 not support varargs. */
29589 static void
29590 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29591 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29592 tree function)
29594 rtx this_rtx, funexp;
29595 rtx_insn *insn;
29597 reload_completed = 1;
29598 epilogue_completed = 1;
29600 /* Mark the end of the (empty) prologue. */
29601 emit_note (NOTE_INSN_PROLOGUE_END);
29603 /* Find the "this" pointer. If the function returns a structure,
29604 the structure return pointer is in r3. */
29605 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29606 this_rtx = gen_rtx_REG (Pmode, 4);
29607 else
29608 this_rtx = gen_rtx_REG (Pmode, 3);
29610 /* Apply the constant offset, if required. */
29611 if (delta)
29612 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29614 /* Apply the offset from the vtable, if required. */
29615 if (vcall_offset)
29617 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29618 rtx tmp = gen_rtx_REG (Pmode, 12);
29620 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29621 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29623 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29624 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29626 else
29628 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29630 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29632 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29635 /* Generate a tail call to the target function. */
29636 if (!TREE_USED (function))
29638 assemble_external (function);
29639 TREE_USED (function) = 1;
29641 funexp = XEXP (DECL_RTL (function), 0);
29642 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29644 #if TARGET_MACHO
29645 if (MACHOPIC_INDIRECT)
29646 funexp = machopic_indirect_call_target (funexp);
29647 #endif
29649 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29650 generate sibcall RTL explicitly. */
29651 insn = emit_call_insn (
29652 gen_rtx_PARALLEL (VOIDmode,
29653 gen_rtvec (3,
29654 gen_rtx_CALL (VOIDmode,
29655 funexp, const0_rtx),
29656 gen_rtx_USE (VOIDmode, const0_rtx),
29657 simple_return_rtx)));
29658 SIBLING_CALL_P (insn) = 1;
29659 emit_barrier ();
29661 /* Run just enough of rest_of_compilation to get the insns emitted.
29662 There's not really enough bulk here to make other passes such as
29663 instruction scheduling worth while. Note that use_thunk calls
29664 assemble_start_function and assemble_end_function. */
29665 insn = get_insns ();
29666 shorten_branches (insn);
29667 final_start_function (insn, file, 1);
29668 final (insn, file, 1);
29669 final_end_function ();
29671 reload_completed = 0;
29672 epilogue_completed = 0;
29675 /* A quick summary of the various types of 'constant-pool tables'
29676 under PowerPC:
29678 Target Flags Name One table per
29679 AIX (none) AIX TOC object file
29680 AIX -mfull-toc AIX TOC object file
29681 AIX -mminimal-toc AIX minimal TOC translation unit
29682 SVR4/EABI (none) SVR4 SDATA object file
29683 SVR4/EABI -fpic SVR4 pic object file
29684 SVR4/EABI -fPIC SVR4 PIC translation unit
29685 SVR4/EABI -mrelocatable EABI TOC function
29686 SVR4/EABI -maix AIX TOC object file
29687 SVR4/EABI -maix -mminimal-toc
29688 AIX minimal TOC translation unit
29690 Name Reg. Set by entries contains:
29691 made by addrs? fp? sum?
29693 AIX TOC 2 crt0 as Y option option
29694 AIX minimal TOC 30 prolog gcc Y Y option
29695 SVR4 SDATA 13 crt0 gcc N Y N
29696 SVR4 pic 30 prolog ld Y not yet N
29697 SVR4 PIC 30 prolog gcc Y option option
29698 EABI TOC 30 prolog gcc Y option option
29702 /* Hash functions for the hash table. */
29704 static unsigned
29705 rs6000_hash_constant (rtx k)
29707 enum rtx_code code = GET_CODE (k);
29708 machine_mode mode = GET_MODE (k);
29709 unsigned result = (code << 3) ^ mode;
29710 const char *format;
29711 int flen, fidx;
29713 format = GET_RTX_FORMAT (code);
29714 flen = strlen (format);
29715 fidx = 0;
29717 switch (code)
29719 case LABEL_REF:
29720 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29722 case CONST_WIDE_INT:
29724 int i;
29725 flen = CONST_WIDE_INT_NUNITS (k);
29726 for (i = 0; i < flen; i++)
29727 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29728 return result;
29731 case CONST_DOUBLE:
29732 if (mode != VOIDmode)
29733 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29734 flen = 2;
29735 break;
29737 case CODE_LABEL:
29738 fidx = 3;
29739 break;
29741 default:
29742 break;
29745 for (; fidx < flen; fidx++)
29746 switch (format[fidx])
29748 case 's':
29750 unsigned i, len;
29751 const char *str = XSTR (k, fidx);
29752 len = strlen (str);
29753 result = result * 613 + len;
29754 for (i = 0; i < len; i++)
29755 result = result * 613 + (unsigned) str[i];
29756 break;
29758 case 'u':
29759 case 'e':
29760 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29761 break;
29762 case 'i':
29763 case 'n':
29764 result = result * 613 + (unsigned) XINT (k, fidx);
29765 break;
29766 case 'w':
29767 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29768 result = result * 613 + (unsigned) XWINT (k, fidx);
29769 else
29771 size_t i;
29772 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29773 result = result * 613 + (unsigned) (XWINT (k, fidx)
29774 >> CHAR_BIT * i);
29776 break;
29777 case '0':
29778 break;
29779 default:
29780 gcc_unreachable ();
29783 return result;
29786 hashval_t
29787 toc_hasher::hash (toc_hash_struct *thc)
29789 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29792 /* Compare H1 and H2 for equivalence. */
29794 bool
29795 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29797 rtx r1 = h1->key;
29798 rtx r2 = h2->key;
29800 if (h1->key_mode != h2->key_mode)
29801 return 0;
29803 return rtx_equal_p (r1, r2);
29806 /* These are the names given by the C++ front-end to vtables, and
29807 vtable-like objects. Ideally, this logic should not be here;
29808 instead, there should be some programmatic way of inquiring as
29809 to whether or not an object is a vtable. */
29811 #define VTABLE_NAME_P(NAME) \
29812 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29813 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29814 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29815 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29816 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29818 #ifdef NO_DOLLAR_IN_LABEL
29819 /* Return a GGC-allocated character string translating dollar signs in
29820 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29822 const char *
29823 rs6000_xcoff_strip_dollar (const char *name)
29825 char *strip, *p;
29826 const char *q;
29827 size_t len;
29829 q = (const char *) strchr (name, '$');
29831 if (q == 0 || q == name)
29832 return name;
29834 len = strlen (name);
29835 strip = XALLOCAVEC (char, len + 1);
29836 strcpy (strip, name);
29837 p = strip + (q - name);
29838 while (p)
29840 *p = '_';
29841 p = strchr (p + 1, '$');
29844 return ggc_alloc_string (strip, len);
29846 #endif
29848 void
29849 rs6000_output_symbol_ref (FILE *file, rtx x)
29851 const char *name = XSTR (x, 0);
29853 /* Currently C++ toc references to vtables can be emitted before it
29854 is decided whether the vtable is public or private. If this is
29855 the case, then the linker will eventually complain that there is
29856 a reference to an unknown section. Thus, for vtables only,
29857 we emit the TOC reference to reference the identifier and not the
29858 symbol. */
29859 if (VTABLE_NAME_P (name))
29861 RS6000_OUTPUT_BASENAME (file, name);
29863 else
29864 assemble_name (file, name);
29867 /* Output a TOC entry. We derive the entry name from what is being
29868 written. */
29870 void
29871 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29873 char buf[256];
29874 const char *name = buf;
29875 rtx base = x;
29876 HOST_WIDE_INT offset = 0;
29878 gcc_assert (!TARGET_NO_TOC);
29880 /* When the linker won't eliminate them, don't output duplicate
29881 TOC entries (this happens on AIX if there is any kind of TOC,
29882 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29883 CODE_LABELs. */
29884 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29886 struct toc_hash_struct *h;
29888 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29889 time because GGC is not initialized at that point. */
29890 if (toc_hash_table == NULL)
29891 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29893 h = ggc_alloc<toc_hash_struct> ();
29894 h->key = x;
29895 h->key_mode = mode;
29896 h->labelno = labelno;
29898 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29899 if (*found == NULL)
29900 *found = h;
29901 else /* This is indeed a duplicate.
29902 Set this label equal to that label. */
29904 fputs ("\t.set ", file);
29905 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29906 fprintf (file, "%d,", labelno);
29907 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29908 fprintf (file, "%d\n", ((*found)->labelno));
29910 #ifdef HAVE_AS_TLS
29911 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29912 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29913 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29915 fputs ("\t.set ", file);
29916 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29917 fprintf (file, "%d,", labelno);
29918 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29919 fprintf (file, "%d\n", ((*found)->labelno));
29921 #endif
29922 return;
29926 /* If we're going to put a double constant in the TOC, make sure it's
29927 aligned properly when strict alignment is on. */
29928 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29929 && STRICT_ALIGNMENT
29930 && GET_MODE_BITSIZE (mode) >= 64
29931 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29932 ASM_OUTPUT_ALIGN (file, 3);
29935 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29937 /* Handle FP constants specially. Note that if we have a minimal
29938 TOC, things we put here aren't actually in the TOC, so we can allow
29939 FP constants. */
29940 if (GET_CODE (x) == CONST_DOUBLE &&
29941 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29942 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29944 long k[4];
29946 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29947 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29948 else
29949 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29951 if (TARGET_64BIT)
29953 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29954 fputs (DOUBLE_INT_ASM_OP, file);
29955 else
29956 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29957 k[0] & 0xffffffff, k[1] & 0xffffffff,
29958 k[2] & 0xffffffff, k[3] & 0xffffffff);
29959 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29960 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29961 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29962 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29963 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29964 return;
29966 else
29968 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29969 fputs ("\t.long ", file);
29970 else
29971 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29972 k[0] & 0xffffffff, k[1] & 0xffffffff,
29973 k[2] & 0xffffffff, k[3] & 0xffffffff);
29974 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29975 k[0] & 0xffffffff, k[1] & 0xffffffff,
29976 k[2] & 0xffffffff, k[3] & 0xffffffff);
29977 return;
29980 else if (GET_CODE (x) == CONST_DOUBLE &&
29981 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29983 long k[2];
29985 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29986 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29987 else
29988 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29990 if (TARGET_64BIT)
29992 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29993 fputs (DOUBLE_INT_ASM_OP, file);
29994 else
29995 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29996 k[0] & 0xffffffff, k[1] & 0xffffffff);
29997 fprintf (file, "0x%lx%08lx\n",
29998 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29999 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
30000 return;
30002 else
30004 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30005 fputs ("\t.long ", file);
30006 else
30007 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30008 k[0] & 0xffffffff, k[1] & 0xffffffff);
30009 fprintf (file, "0x%lx,0x%lx\n",
30010 k[0] & 0xffffffff, k[1] & 0xffffffff);
30011 return;
30014 else if (GET_CODE (x) == CONST_DOUBLE &&
30015 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
30017 long l;
30019 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30020 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
30021 else
30022 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
30024 if (TARGET_64BIT)
30026 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30027 fputs (DOUBLE_INT_ASM_OP, file);
30028 else
30029 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30030 if (WORDS_BIG_ENDIAN)
30031 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
30032 else
30033 fprintf (file, "0x%lx\n", l & 0xffffffff);
30034 return;
30036 else
30038 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30039 fputs ("\t.long ", file);
30040 else
30041 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30042 fprintf (file, "0x%lx\n", l & 0xffffffff);
30043 return;
30046 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
30048 unsigned HOST_WIDE_INT low;
30049 HOST_WIDE_INT high;
30051 low = INTVAL (x) & 0xffffffff;
30052 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30054 /* TOC entries are always Pmode-sized, so when big-endian
30055 smaller integer constants in the TOC need to be padded.
30056 (This is still a win over putting the constants in
30057 a separate constant pool, because then we'd have
30058 to have both a TOC entry _and_ the actual constant.)
30060 For a 32-bit target, CONST_INT values are loaded and shifted
30061 entirely within `low' and can be stored in one TOC entry. */
30063 /* It would be easy to make this work, but it doesn't now. */
30064 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30066 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30068 low |= high << 32;
30069 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30070 high = (HOST_WIDE_INT) low >> 32;
30071 low &= 0xffffffff;
30074 if (TARGET_64BIT)
30076 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30077 fputs (DOUBLE_INT_ASM_OP, file);
30078 else
30079 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30080 (long) high & 0xffffffff, (long) low & 0xffffffff);
30081 fprintf (file, "0x%lx%08lx\n",
30082 (long) high & 0xffffffff, (long) low & 0xffffffff);
30083 return;
30085 else
30087 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30089 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30090 fputs ("\t.long ", file);
30091 else
30092 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30093 (long) high & 0xffffffff, (long) low & 0xffffffff);
30094 fprintf (file, "0x%lx,0x%lx\n",
30095 (long) high & 0xffffffff, (long) low & 0xffffffff);
30097 else
30099 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30100 fputs ("\t.long ", file);
30101 else
30102 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30103 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30105 return;
30109 if (GET_CODE (x) == CONST)
30111 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30112 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
30114 base = XEXP (XEXP (x, 0), 0);
30115 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30118 switch (GET_CODE (base))
30120 case SYMBOL_REF:
30121 name = XSTR (base, 0);
30122 break;
30124 case LABEL_REF:
30125 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30126 CODE_LABEL_NUMBER (XEXP (base, 0)));
30127 break;
30129 case CODE_LABEL:
30130 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30131 break;
30133 default:
30134 gcc_unreachable ();
30137 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30138 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30139 else
30141 fputs ("\t.tc ", file);
30142 RS6000_OUTPUT_BASENAME (file, name);
30144 if (offset < 0)
30145 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30146 else if (offset)
30147 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30149 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30150 after other TOC symbols, reducing overflow of small TOC access
30151 to [TC] symbols. */
30152 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30153 ? "[TE]," : "[TC],", file);
30156 /* Currently C++ toc references to vtables can be emitted before it
30157 is decided whether the vtable is public or private. If this is
30158 the case, then the linker will eventually complain that there is
30159 a TOC reference to an unknown section. Thus, for vtables only,
30160 we emit the TOC reference to reference the symbol and not the
30161 section. */
30162 if (VTABLE_NAME_P (name))
30164 RS6000_OUTPUT_BASENAME (file, name);
30165 if (offset < 0)
30166 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30167 else if (offset > 0)
30168 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30170 else
30171 output_addr_const (file, x);
30173 #if HAVE_AS_TLS
30174 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30176 switch (SYMBOL_REF_TLS_MODEL (base))
30178 case 0:
30179 break;
30180 case TLS_MODEL_LOCAL_EXEC:
30181 fputs ("@le", file);
30182 break;
30183 case TLS_MODEL_INITIAL_EXEC:
30184 fputs ("@ie", file);
30185 break;
30186 /* Use global-dynamic for local-dynamic. */
30187 case TLS_MODEL_GLOBAL_DYNAMIC:
30188 case TLS_MODEL_LOCAL_DYNAMIC:
30189 putc ('\n', file);
30190 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30191 fputs ("\t.tc .", file);
30192 RS6000_OUTPUT_BASENAME (file, name);
30193 fputs ("[TC],", file);
30194 output_addr_const (file, x);
30195 fputs ("@m", file);
30196 break;
30197 default:
30198 gcc_unreachable ();
30201 #endif
30203 putc ('\n', file);
30206 /* Output an assembler pseudo-op to write an ASCII string of N characters
30207 starting at P to FILE.
30209 On the RS/6000, we have to do this using the .byte operation and
30210 write out special characters outside the quoted string.
30211 Also, the assembler is broken; very long strings are truncated,
30212 so we must artificially break them up early. */
30214 void
30215 output_ascii (FILE *file, const char *p, int n)
30217 char c;
30218 int i, count_string;
30219 const char *for_string = "\t.byte \"";
30220 const char *for_decimal = "\t.byte ";
30221 const char *to_close = NULL;
30223 count_string = 0;
30224 for (i = 0; i < n; i++)
30226 c = *p++;
30227 if (c >= ' ' && c < 0177)
30229 if (for_string)
30230 fputs (for_string, file);
30231 putc (c, file);
30233 /* Write two quotes to get one. */
30234 if (c == '"')
30236 putc (c, file);
30237 ++count_string;
30240 for_string = NULL;
30241 for_decimal = "\"\n\t.byte ";
30242 to_close = "\"\n";
30243 ++count_string;
30245 if (count_string >= 512)
30247 fputs (to_close, file);
30249 for_string = "\t.byte \"";
30250 for_decimal = "\t.byte ";
30251 to_close = NULL;
30252 count_string = 0;
30255 else
30257 if (for_decimal)
30258 fputs (for_decimal, file);
30259 fprintf (file, "%d", c);
30261 for_string = "\n\t.byte \"";
30262 for_decimal = ", ";
30263 to_close = "\n";
30264 count_string = 0;
30268 /* Now close the string if we have written one. Then end the line. */
30269 if (to_close)
30270 fputs (to_close, file);
30273 /* Generate a unique section name for FILENAME for a section type
30274 represented by SECTION_DESC. Output goes into BUF.
30276 SECTION_DESC can be any string, as long as it is different for each
30277 possible section type.
30279 We name the section in the same manner as xlc. The name begins with an
30280 underscore followed by the filename (after stripping any leading directory
30281 names) with the last period replaced by the string SECTION_DESC. If
30282 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30283 the name. */
30285 void
30286 rs6000_gen_section_name (char **buf, const char *filename,
30287 const char *section_desc)
30289 const char *q, *after_last_slash, *last_period = 0;
30290 char *p;
30291 int len;
30293 after_last_slash = filename;
30294 for (q = filename; *q; q++)
30296 if (*q == '/')
30297 after_last_slash = q + 1;
30298 else if (*q == '.')
30299 last_period = q;
30302 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30303 *buf = (char *) xmalloc (len);
30305 p = *buf;
30306 *p++ = '_';
30308 for (q = after_last_slash; *q; q++)
30310 if (q == last_period)
30312 strcpy (p, section_desc);
30313 p += strlen (section_desc);
30314 break;
30317 else if (ISALNUM (*q))
30318 *p++ = *q;
30321 if (last_period == 0)
30322 strcpy (p, section_desc);
30323 else
30324 *p = '\0';
30327 /* Emit profile function. */
30329 void
30330 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30332 /* Non-standard profiling for kernels, which just saves LR then calls
30333 _mcount without worrying about arg saves. The idea is to change
30334 the function prologue as little as possible as it isn't easy to
30335 account for arg save/restore code added just for _mcount. */
30336 if (TARGET_PROFILE_KERNEL)
30337 return;
30339 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30341 #ifndef NO_PROFILE_COUNTERS
30342 # define NO_PROFILE_COUNTERS 0
30343 #endif
30344 if (NO_PROFILE_COUNTERS)
30345 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30346 LCT_NORMAL, VOIDmode);
30347 else
30349 char buf[30];
30350 const char *label_name;
30351 rtx fun;
30353 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30354 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30355 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30357 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30358 LCT_NORMAL, VOIDmode, fun, Pmode);
30361 else if (DEFAULT_ABI == ABI_DARWIN)
30363 const char *mcount_name = RS6000_MCOUNT;
30364 int caller_addr_regno = LR_REGNO;
30366 /* Be conservative and always set this, at least for now. */
30367 crtl->uses_pic_offset_table = 1;
30369 #if TARGET_MACHO
30370 /* For PIC code, set up a stub and collect the caller's address
30371 from r0, which is where the prologue puts it. */
30372 if (MACHOPIC_INDIRECT
30373 && crtl->uses_pic_offset_table)
30374 caller_addr_regno = 0;
30375 #endif
30376 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30377 LCT_NORMAL, VOIDmode,
30378 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30382 /* Write function profiler code. */
30384 void
30385 output_function_profiler (FILE *file, int labelno)
30387 char buf[100];
30389 switch (DEFAULT_ABI)
30391 default:
30392 gcc_unreachable ();
30394 case ABI_V4:
30395 if (!TARGET_32BIT)
30397 warning (0, "no profiling of 64-bit code for this ABI");
30398 return;
30400 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30401 fprintf (file, "\tmflr %s\n", reg_names[0]);
30402 if (NO_PROFILE_COUNTERS)
30404 asm_fprintf (file, "\tstw %s,4(%s)\n",
30405 reg_names[0], reg_names[1]);
30407 else if (TARGET_SECURE_PLT && flag_pic)
30409 if (TARGET_LINK_STACK)
30411 char name[32];
30412 get_ppc476_thunk_name (name);
30413 asm_fprintf (file, "\tbl %s\n", name);
30415 else
30416 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30417 asm_fprintf (file, "\tstw %s,4(%s)\n",
30418 reg_names[0], reg_names[1]);
30419 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30420 asm_fprintf (file, "\taddis %s,%s,",
30421 reg_names[12], reg_names[12]);
30422 assemble_name (file, buf);
30423 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30424 assemble_name (file, buf);
30425 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30427 else if (flag_pic == 1)
30429 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30430 asm_fprintf (file, "\tstw %s,4(%s)\n",
30431 reg_names[0], reg_names[1]);
30432 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30433 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30434 assemble_name (file, buf);
30435 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30437 else if (flag_pic > 1)
30439 asm_fprintf (file, "\tstw %s,4(%s)\n",
30440 reg_names[0], reg_names[1]);
30441 /* Now, we need to get the address of the label. */
30442 if (TARGET_LINK_STACK)
30444 char name[32];
30445 get_ppc476_thunk_name (name);
30446 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30447 assemble_name (file, buf);
30448 fputs ("-.\n1:", file);
30449 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30450 asm_fprintf (file, "\taddi %s,%s,4\n",
30451 reg_names[11], reg_names[11]);
30453 else
30455 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30456 assemble_name (file, buf);
30457 fputs ("-.\n1:", file);
30458 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30460 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30461 reg_names[0], reg_names[11]);
30462 asm_fprintf (file, "\tadd %s,%s,%s\n",
30463 reg_names[0], reg_names[0], reg_names[11]);
30465 else
30467 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30468 assemble_name (file, buf);
30469 fputs ("@ha\n", file);
30470 asm_fprintf (file, "\tstw %s,4(%s)\n",
30471 reg_names[0], reg_names[1]);
30472 asm_fprintf (file, "\tla %s,", reg_names[0]);
30473 assemble_name (file, buf);
30474 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30477 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30478 fprintf (file, "\tbl %s%s\n",
30479 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30480 break;
30482 case ABI_AIX:
30483 case ABI_ELFv2:
30484 case ABI_DARWIN:
30485 /* Don't do anything, done in output_profile_hook (). */
30486 break;
30492 /* The following variable value is the last issued insn. */
30494 static rtx_insn *last_scheduled_insn;
30496 /* The following variable helps to balance issuing of load and
30497 store instructions */
30499 static int load_store_pendulum;
30501 /* The following variable helps pair divide insns during scheduling. */
30502 static int divide_cnt;
30503 /* The following variable helps pair and alternate vector and vector load
30504 insns during scheduling. */
30505 static int vec_pairing;
30508 /* Power4 load update and store update instructions are cracked into a
30509 load or store and an integer insn which are executed in the same cycle.
30510 Branches have their own dispatch slot which does not count against the
30511 GCC issue rate, but it changes the program flow so there are no other
30512 instructions to issue in this cycle. */
30514 static int
30515 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30517 last_scheduled_insn = insn;
30518 if (GET_CODE (PATTERN (insn)) == USE
30519 || GET_CODE (PATTERN (insn)) == CLOBBER)
30521 cached_can_issue_more = more;
30522 return cached_can_issue_more;
30525 if (insn_terminates_group_p (insn, current_group))
30527 cached_can_issue_more = 0;
30528 return cached_can_issue_more;
30531 /* If no reservation, but reach here */
30532 if (recog_memoized (insn) < 0)
30533 return more;
30535 if (rs6000_sched_groups)
30537 if (is_microcoded_insn (insn))
30538 cached_can_issue_more = 0;
30539 else if (is_cracked_insn (insn))
30540 cached_can_issue_more = more > 2 ? more - 2 : 0;
30541 else
30542 cached_can_issue_more = more - 1;
30544 return cached_can_issue_more;
30547 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30548 return 0;
30550 cached_can_issue_more = more - 1;
30551 return cached_can_issue_more;
30554 static int
30555 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30557 int r = rs6000_variable_issue_1 (insn, more);
30558 if (verbose)
30559 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30560 return r;
30563 /* Adjust the cost of a scheduling dependency. Return the new cost of
30564 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30566 static int
30567 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30568 unsigned int)
30570 enum attr_type attr_type;
30572 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30573 return cost;
30575 switch (dep_type)
30577 case REG_DEP_TRUE:
30579 /* Data dependency; DEP_INSN writes a register that INSN reads
30580 some cycles later. */
30582 /* Separate a load from a narrower, dependent store. */
30583 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30584 && GET_CODE (PATTERN (insn)) == SET
30585 && GET_CODE (PATTERN (dep_insn)) == SET
30586 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30587 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30588 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30589 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30590 return cost + 14;
30592 attr_type = get_attr_type (insn);
30594 switch (attr_type)
30596 case TYPE_JMPREG:
30597 /* Tell the first scheduling pass about the latency between
30598 a mtctr and bctr (and mtlr and br/blr). The first
30599 scheduling pass will not know about this latency since
30600 the mtctr instruction, which has the latency associated
30601 to it, will be generated by reload. */
30602 return 4;
30603 case TYPE_BRANCH:
30604 /* Leave some extra cycles between a compare and its
30605 dependent branch, to inhibit expensive mispredicts. */
30606 if ((rs6000_cpu_attr == CPU_PPC603
30607 || rs6000_cpu_attr == CPU_PPC604
30608 || rs6000_cpu_attr == CPU_PPC604E
30609 || rs6000_cpu_attr == CPU_PPC620
30610 || rs6000_cpu_attr == CPU_PPC630
30611 || rs6000_cpu_attr == CPU_PPC750
30612 || rs6000_cpu_attr == CPU_PPC7400
30613 || rs6000_cpu_attr == CPU_PPC7450
30614 || rs6000_cpu_attr == CPU_PPCE5500
30615 || rs6000_cpu_attr == CPU_PPCE6500
30616 || rs6000_cpu_attr == CPU_POWER4
30617 || rs6000_cpu_attr == CPU_POWER5
30618 || rs6000_cpu_attr == CPU_POWER7
30619 || rs6000_cpu_attr == CPU_POWER8
30620 || rs6000_cpu_attr == CPU_POWER9
30621 || rs6000_cpu_attr == CPU_CELL)
30622 && recog_memoized (dep_insn)
30623 && (INSN_CODE (dep_insn) >= 0))
30625 switch (get_attr_type (dep_insn))
30627 case TYPE_CMP:
30628 case TYPE_FPCOMPARE:
30629 case TYPE_CR_LOGICAL:
30630 case TYPE_DELAYED_CR:
30631 return cost + 2;
30632 case TYPE_EXTS:
30633 case TYPE_MUL:
30634 if (get_attr_dot (dep_insn) == DOT_YES)
30635 return cost + 2;
30636 else
30637 break;
30638 case TYPE_SHIFT:
30639 if (get_attr_dot (dep_insn) == DOT_YES
30640 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30641 return cost + 2;
30642 else
30643 break;
30644 default:
30645 break;
30647 break;
30649 case TYPE_STORE:
30650 case TYPE_FPSTORE:
30651 if ((rs6000_cpu == PROCESSOR_POWER6)
30652 && recog_memoized (dep_insn)
30653 && (INSN_CODE (dep_insn) >= 0))
30656 if (GET_CODE (PATTERN (insn)) != SET)
30657 /* If this happens, we have to extend this to schedule
30658 optimally. Return default for now. */
30659 return cost;
30661 /* Adjust the cost for the case where the value written
30662 by a fixed point operation is used as the address
30663 gen value on a store. */
30664 switch (get_attr_type (dep_insn))
30666 case TYPE_LOAD:
30667 case TYPE_CNTLZ:
30669 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30670 return get_attr_sign_extend (dep_insn)
30671 == SIGN_EXTEND_YES ? 6 : 4;
30672 break;
30674 case TYPE_SHIFT:
30676 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30677 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30678 6 : 3;
30679 break;
30681 case TYPE_INTEGER:
30682 case TYPE_ADD:
30683 case TYPE_LOGICAL:
30684 case TYPE_EXTS:
30685 case TYPE_INSERT:
30687 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30688 return 3;
30689 break;
30691 case TYPE_STORE:
30692 case TYPE_FPLOAD:
30693 case TYPE_FPSTORE:
30695 if (get_attr_update (dep_insn) == UPDATE_YES
30696 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30697 return 3;
30698 break;
30700 case TYPE_MUL:
30702 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30703 return 17;
30704 break;
30706 case TYPE_DIV:
30708 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30709 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30710 break;
30712 default:
30713 break;
30716 break;
30718 case TYPE_LOAD:
30719 if ((rs6000_cpu == PROCESSOR_POWER6)
30720 && recog_memoized (dep_insn)
30721 && (INSN_CODE (dep_insn) >= 0))
30724 /* Adjust the cost for the case where the value written
30725 by a fixed point instruction is used within the address
30726 gen portion of a subsequent load(u)(x) */
30727 switch (get_attr_type (dep_insn))
30729 case TYPE_LOAD:
30730 case TYPE_CNTLZ:
30732 if (set_to_load_agen (dep_insn, insn))
30733 return get_attr_sign_extend (dep_insn)
30734 == SIGN_EXTEND_YES ? 6 : 4;
30735 break;
30737 case TYPE_SHIFT:
30739 if (set_to_load_agen (dep_insn, insn))
30740 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30741 6 : 3;
30742 break;
30744 case TYPE_INTEGER:
30745 case TYPE_ADD:
30746 case TYPE_LOGICAL:
30747 case TYPE_EXTS:
30748 case TYPE_INSERT:
30750 if (set_to_load_agen (dep_insn, insn))
30751 return 3;
30752 break;
30754 case TYPE_STORE:
30755 case TYPE_FPLOAD:
30756 case TYPE_FPSTORE:
30758 if (get_attr_update (dep_insn) == UPDATE_YES
30759 && set_to_load_agen (dep_insn, insn))
30760 return 3;
30761 break;
30763 case TYPE_MUL:
30765 if (set_to_load_agen (dep_insn, insn))
30766 return 17;
30767 break;
30769 case TYPE_DIV:
30771 if (set_to_load_agen (dep_insn, insn))
30772 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30773 break;
30775 default:
30776 break;
30779 break;
30781 case TYPE_FPLOAD:
30782 if ((rs6000_cpu == PROCESSOR_POWER6)
30783 && get_attr_update (insn) == UPDATE_NO
30784 && recog_memoized (dep_insn)
30785 && (INSN_CODE (dep_insn) >= 0)
30786 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30787 return 2;
30789 default:
30790 break;
30793 /* Fall out to return default cost. */
30795 break;
30797 case REG_DEP_OUTPUT:
30798 /* Output dependency; DEP_INSN writes a register that INSN writes some
30799 cycles later. */
30800 if ((rs6000_cpu == PROCESSOR_POWER6)
30801 && recog_memoized (dep_insn)
30802 && (INSN_CODE (dep_insn) >= 0))
30804 attr_type = get_attr_type (insn);
30806 switch (attr_type)
30808 case TYPE_FP:
30809 case TYPE_FPSIMPLE:
30810 if (get_attr_type (dep_insn) == TYPE_FP
30811 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30812 return 1;
30813 break;
30814 case TYPE_FPLOAD:
30815 if (get_attr_update (insn) == UPDATE_NO
30816 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30817 return 2;
30818 break;
30819 default:
30820 break;
30823 /* Fall through, no cost for output dependency. */
30824 /* FALLTHRU */
30826 case REG_DEP_ANTI:
30827 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30828 cycles later. */
30829 return 0;
30831 default:
30832 gcc_unreachable ();
30835 return cost;
30838 /* Debug version of rs6000_adjust_cost. */
30840 static int
30841 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30842 int cost, unsigned int dw)
30844 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30846 if (ret != cost)
30848 const char *dep;
30850 switch (dep_type)
30852 default: dep = "unknown depencency"; break;
30853 case REG_DEP_TRUE: dep = "data dependency"; break;
30854 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30855 case REG_DEP_ANTI: dep = "anti depencency"; break;
30858 fprintf (stderr,
30859 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30860 "%s, insn:\n", ret, cost, dep);
30862 debug_rtx (insn);
30865 return ret;
30868 /* The function returns a true if INSN is microcoded.
30869 Return false otherwise. */
30871 static bool
30872 is_microcoded_insn (rtx_insn *insn)
30874 if (!insn || !NONDEBUG_INSN_P (insn)
30875 || GET_CODE (PATTERN (insn)) == USE
30876 || GET_CODE (PATTERN (insn)) == CLOBBER)
30877 return false;
30879 if (rs6000_cpu_attr == CPU_CELL)
30880 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30882 if (rs6000_sched_groups
30883 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30885 enum attr_type type = get_attr_type (insn);
30886 if ((type == TYPE_LOAD
30887 && get_attr_update (insn) == UPDATE_YES
30888 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30889 || ((type == TYPE_LOAD || type == TYPE_STORE)
30890 && get_attr_update (insn) == UPDATE_YES
30891 && get_attr_indexed (insn) == INDEXED_YES)
30892 || type == TYPE_MFCR)
30893 return true;
30896 return false;
30899 /* The function returns true if INSN is cracked into 2 instructions
30900 by the processor (and therefore occupies 2 issue slots). */
30902 static bool
30903 is_cracked_insn (rtx_insn *insn)
30905 if (!insn || !NONDEBUG_INSN_P (insn)
30906 || GET_CODE (PATTERN (insn)) == USE
30907 || GET_CODE (PATTERN (insn)) == CLOBBER)
30908 return false;
30910 if (rs6000_sched_groups
30911 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30913 enum attr_type type = get_attr_type (insn);
30914 if ((type == TYPE_LOAD
30915 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30916 && get_attr_update (insn) == UPDATE_NO)
30917 || (type == TYPE_LOAD
30918 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30919 && get_attr_update (insn) == UPDATE_YES
30920 && get_attr_indexed (insn) == INDEXED_NO)
30921 || (type == TYPE_STORE
30922 && get_attr_update (insn) == UPDATE_YES
30923 && get_attr_indexed (insn) == INDEXED_NO)
30924 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30925 && get_attr_update (insn) == UPDATE_YES)
30926 || type == TYPE_DELAYED_CR
30927 || (type == TYPE_EXTS
30928 && get_attr_dot (insn) == DOT_YES)
30929 || (type == TYPE_SHIFT
30930 && get_attr_dot (insn) == DOT_YES
30931 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30932 || (type == TYPE_MUL
30933 && get_attr_dot (insn) == DOT_YES)
30934 || type == TYPE_DIV
30935 || (type == TYPE_INSERT
30936 && get_attr_size (insn) == SIZE_32))
30937 return true;
30940 return false;
30943 /* The function returns true if INSN can be issued only from
30944 the branch slot. */
30946 static bool
30947 is_branch_slot_insn (rtx_insn *insn)
30949 if (!insn || !NONDEBUG_INSN_P (insn)
30950 || GET_CODE (PATTERN (insn)) == USE
30951 || GET_CODE (PATTERN (insn)) == CLOBBER)
30952 return false;
30954 if (rs6000_sched_groups)
30956 enum attr_type type = get_attr_type (insn);
30957 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30958 return true;
30959 return false;
30962 return false;
30965 /* The function returns true if out_inst sets a value that is
30966 used in the address generation computation of in_insn */
30967 static bool
30968 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30970 rtx out_set, in_set;
30972 /* For performance reasons, only handle the simple case where
30973 both loads are a single_set. */
30974 out_set = single_set (out_insn);
30975 if (out_set)
30977 in_set = single_set (in_insn);
30978 if (in_set)
30979 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30982 return false;
30985 /* Try to determine base/offset/size parts of the given MEM.
30986 Return true if successful, false if all the values couldn't
30987 be determined.
30989 This function only looks for REG or REG+CONST address forms.
30990 REG+REG address form will return false. */
30992 static bool
30993 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30994 HOST_WIDE_INT *size)
30996 rtx addr_rtx;
30997 if MEM_SIZE_KNOWN_P (mem)
30998 *size = MEM_SIZE (mem);
30999 else
31000 return false;
31002 addr_rtx = (XEXP (mem, 0));
31003 if (GET_CODE (addr_rtx) == PRE_MODIFY)
31004 addr_rtx = XEXP (addr_rtx, 1);
31006 *offset = 0;
31007 while (GET_CODE (addr_rtx) == PLUS
31008 && CONST_INT_P (XEXP (addr_rtx, 1)))
31010 *offset += INTVAL (XEXP (addr_rtx, 1));
31011 addr_rtx = XEXP (addr_rtx, 0);
31013 if (!REG_P (addr_rtx))
31014 return false;
31016 *base = addr_rtx;
31017 return true;
31020 /* The function returns true if the target storage location of
31021 mem1 is adjacent to the target storage location of mem2 */
31022 /* Return 1 if memory locations are adjacent. */
31024 static bool
31025 adjacent_mem_locations (rtx mem1, rtx mem2)
31027 rtx reg1, reg2;
31028 HOST_WIDE_INT off1, size1, off2, size2;
31030 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31031 && get_memref_parts (mem2, &reg2, &off2, &size2))
31032 return ((REGNO (reg1) == REGNO (reg2))
31033 && ((off1 + size1 == off2)
31034 || (off2 + size2 == off1)));
31036 return false;
31039 /* This function returns true if it can be determined that the two MEM
31040 locations overlap by at least 1 byte based on base reg/offset/size. */
31042 static bool
31043 mem_locations_overlap (rtx mem1, rtx mem2)
31045 rtx reg1, reg2;
31046 HOST_WIDE_INT off1, size1, off2, size2;
31048 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31049 && get_memref_parts (mem2, &reg2, &off2, &size2))
31050 return ((REGNO (reg1) == REGNO (reg2))
31051 && (((off1 <= off2) && (off1 + size1 > off2))
31052 || ((off2 <= off1) && (off2 + size2 > off1))));
31054 return false;
31057 /* A C statement (sans semicolon) to update the integer scheduling
31058 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31059 INSN earlier, reduce the priority to execute INSN later. Do not
31060 define this macro if you do not need to adjust the scheduling
31061 priorities of insns. */
31063 static int
31064 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31066 rtx load_mem, str_mem;
31067 /* On machines (like the 750) which have asymmetric integer units,
31068 where one integer unit can do multiply and divides and the other
31069 can't, reduce the priority of multiply/divide so it is scheduled
31070 before other integer operations. */
31072 #if 0
31073 if (! INSN_P (insn))
31074 return priority;
31076 if (GET_CODE (PATTERN (insn)) == USE)
31077 return priority;
31079 switch (rs6000_cpu_attr) {
31080 case CPU_PPC750:
31081 switch (get_attr_type (insn))
31083 default:
31084 break;
31086 case TYPE_MUL:
31087 case TYPE_DIV:
31088 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31089 priority, priority);
31090 if (priority >= 0 && priority < 0x01000000)
31091 priority >>= 3;
31092 break;
31095 #endif
31097 if (insn_must_be_first_in_group (insn)
31098 && reload_completed
31099 && current_sched_info->sched_max_insns_priority
31100 && rs6000_sched_restricted_insns_priority)
31103 /* Prioritize insns that can be dispatched only in the first
31104 dispatch slot. */
31105 if (rs6000_sched_restricted_insns_priority == 1)
31106 /* Attach highest priority to insn. This means that in
31107 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31108 precede 'priority' (critical path) considerations. */
31109 return current_sched_info->sched_max_insns_priority;
31110 else if (rs6000_sched_restricted_insns_priority == 2)
31111 /* Increase priority of insn by a minimal amount. This means that in
31112 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31113 considerations precede dispatch-slot restriction considerations. */
31114 return (priority + 1);
31117 if (rs6000_cpu == PROCESSOR_POWER6
31118 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31119 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31120 /* Attach highest priority to insn if the scheduler has just issued two
31121 stores and this instruction is a load, or two loads and this instruction
31122 is a store. Power6 wants loads and stores scheduled alternately
31123 when possible */
31124 return current_sched_info->sched_max_insns_priority;
31126 return priority;
31129 /* Return true if the instruction is nonpipelined on the Cell. */
31130 static bool
31131 is_nonpipeline_insn (rtx_insn *insn)
31133 enum attr_type type;
31134 if (!insn || !NONDEBUG_INSN_P (insn)
31135 || GET_CODE (PATTERN (insn)) == USE
31136 || GET_CODE (PATTERN (insn)) == CLOBBER)
31137 return false;
31139 type = get_attr_type (insn);
31140 if (type == TYPE_MUL
31141 || type == TYPE_DIV
31142 || type == TYPE_SDIV
31143 || type == TYPE_DDIV
31144 || type == TYPE_SSQRT
31145 || type == TYPE_DSQRT
31146 || type == TYPE_MFCR
31147 || type == TYPE_MFCRF
31148 || type == TYPE_MFJMPR)
31150 return true;
31152 return false;
31156 /* Return how many instructions the machine can issue per cycle. */
31158 static int
31159 rs6000_issue_rate (void)
31161 /* Unless scheduling for register pressure, use issue rate of 1 for
31162 first scheduling pass to decrease degradation. */
31163 if (!reload_completed && !flag_sched_pressure)
31164 return 1;
31166 switch (rs6000_cpu_attr) {
31167 case CPU_RS64A:
31168 case CPU_PPC601: /* ? */
31169 case CPU_PPC7450:
31170 return 3;
31171 case CPU_PPC440:
31172 case CPU_PPC603:
31173 case CPU_PPC750:
31174 case CPU_PPC7400:
31175 case CPU_PPC8540:
31176 case CPU_PPC8548:
31177 case CPU_CELL:
31178 case CPU_PPCE300C2:
31179 case CPU_PPCE300C3:
31180 case CPU_PPCE500MC:
31181 case CPU_PPCE500MC64:
31182 case CPU_PPCE5500:
31183 case CPU_PPCE6500:
31184 case CPU_TITAN:
31185 return 2;
31186 case CPU_PPC476:
31187 case CPU_PPC604:
31188 case CPU_PPC604E:
31189 case CPU_PPC620:
31190 case CPU_PPC630:
31191 return 4;
31192 case CPU_POWER4:
31193 case CPU_POWER5:
31194 case CPU_POWER6:
31195 case CPU_POWER7:
31196 return 5;
31197 case CPU_POWER8:
31198 return 7;
31199 case CPU_POWER9:
31200 return 6;
31201 default:
31202 return 1;
31206 /* Return how many instructions to look ahead for better insn
31207 scheduling. */
31209 static int
31210 rs6000_use_sched_lookahead (void)
31212 switch (rs6000_cpu_attr)
31214 case CPU_PPC8540:
31215 case CPU_PPC8548:
31216 return 4;
31218 case CPU_CELL:
31219 return (reload_completed ? 8 : 0);
31221 default:
31222 return 0;
31226 /* We are choosing insn from the ready queue. Return zero if INSN can be
31227 chosen. */
31228 static int
31229 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31231 if (ready_index == 0)
31232 return 0;
31234 if (rs6000_cpu_attr != CPU_CELL)
31235 return 0;
31237 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31239 if (!reload_completed
31240 || is_nonpipeline_insn (insn)
31241 || is_microcoded_insn (insn))
31242 return 1;
31244 return 0;
31247 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31248 and return true. */
31250 static bool
31251 find_mem_ref (rtx pat, rtx *mem_ref)
31253 const char * fmt;
31254 int i, j;
31256 /* stack_tie does not produce any real memory traffic. */
31257 if (tie_operand (pat, VOIDmode))
31258 return false;
31260 if (GET_CODE (pat) == MEM)
31262 *mem_ref = pat;
31263 return true;
31266 /* Recursively process the pattern. */
31267 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31269 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31271 if (fmt[i] == 'e')
31273 if (find_mem_ref (XEXP (pat, i), mem_ref))
31274 return true;
31276 else if (fmt[i] == 'E')
31277 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31279 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31280 return true;
31284 return false;
31287 /* Determine if PAT is a PATTERN of a load insn. */
31289 static bool
31290 is_load_insn1 (rtx pat, rtx *load_mem)
31292 if (!pat || pat == NULL_RTX)
31293 return false;
31295 if (GET_CODE (pat) == SET)
31296 return find_mem_ref (SET_SRC (pat), load_mem);
31298 if (GET_CODE (pat) == PARALLEL)
31300 int i;
31302 for (i = 0; i < XVECLEN (pat, 0); i++)
31303 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31304 return true;
31307 return false;
31310 /* Determine if INSN loads from memory. */
31312 static bool
31313 is_load_insn (rtx insn, rtx *load_mem)
31315 if (!insn || !INSN_P (insn))
31316 return false;
31318 if (CALL_P (insn))
31319 return false;
31321 return is_load_insn1 (PATTERN (insn), load_mem);
31324 /* Determine if PAT is a PATTERN of a store insn. */
31326 static bool
31327 is_store_insn1 (rtx pat, rtx *str_mem)
31329 if (!pat || pat == NULL_RTX)
31330 return false;
31332 if (GET_CODE (pat) == SET)
31333 return find_mem_ref (SET_DEST (pat), str_mem);
31335 if (GET_CODE (pat) == PARALLEL)
31337 int i;
31339 for (i = 0; i < XVECLEN (pat, 0); i++)
31340 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31341 return true;
31344 return false;
31347 /* Determine if INSN stores to memory. */
31349 static bool
31350 is_store_insn (rtx insn, rtx *str_mem)
31352 if (!insn || !INSN_P (insn))
31353 return false;
31355 return is_store_insn1 (PATTERN (insn), str_mem);
31358 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31360 static bool
31361 is_power9_pairable_vec_type (enum attr_type type)
31363 switch (type)
31365 case TYPE_VECSIMPLE:
31366 case TYPE_VECCOMPLEX:
31367 case TYPE_VECDIV:
31368 case TYPE_VECCMP:
31369 case TYPE_VECPERM:
31370 case TYPE_VECFLOAT:
31371 case TYPE_VECFDIV:
31372 case TYPE_VECDOUBLE:
31373 return true;
31374 default:
31375 break;
31377 return false;
31380 /* Returns whether the dependence between INSN and NEXT is considered
31381 costly by the given target. */
31383 static bool
31384 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31386 rtx insn;
31387 rtx next;
31388 rtx load_mem, str_mem;
31390 /* If the flag is not enabled - no dependence is considered costly;
31391 allow all dependent insns in the same group.
31392 This is the most aggressive option. */
31393 if (rs6000_sched_costly_dep == no_dep_costly)
31394 return false;
31396 /* If the flag is set to 1 - a dependence is always considered costly;
31397 do not allow dependent instructions in the same group.
31398 This is the most conservative option. */
31399 if (rs6000_sched_costly_dep == all_deps_costly)
31400 return true;
31402 insn = DEP_PRO (dep);
31403 next = DEP_CON (dep);
31405 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31406 && is_load_insn (next, &load_mem)
31407 && is_store_insn (insn, &str_mem))
31408 /* Prevent load after store in the same group. */
31409 return true;
31411 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31412 && is_load_insn (next, &load_mem)
31413 && is_store_insn (insn, &str_mem)
31414 && DEP_TYPE (dep) == REG_DEP_TRUE
31415 && mem_locations_overlap(str_mem, load_mem))
31416 /* Prevent load after store in the same group if it is a true
31417 dependence. */
31418 return true;
31420 /* The flag is set to X; dependences with latency >= X are considered costly,
31421 and will not be scheduled in the same group. */
31422 if (rs6000_sched_costly_dep <= max_dep_latency
31423 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31424 return true;
31426 return false;
31429 /* Return the next insn after INSN that is found before TAIL is reached,
31430 skipping any "non-active" insns - insns that will not actually occupy
31431 an issue slot. Return NULL_RTX if such an insn is not found. */
31433 static rtx_insn *
31434 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31436 if (insn == NULL_RTX || insn == tail)
31437 return NULL;
31439 while (1)
31441 insn = NEXT_INSN (insn);
31442 if (insn == NULL_RTX || insn == tail)
31443 return NULL;
31445 if (CALL_P (insn)
31446 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31447 || (NONJUMP_INSN_P (insn)
31448 && GET_CODE (PATTERN (insn)) != USE
31449 && GET_CODE (PATTERN (insn)) != CLOBBER
31450 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31451 break;
31453 return insn;
31456 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31458 static int
31459 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31461 int pos;
31462 int i;
31463 rtx_insn *tmp;
31464 enum attr_type type, type2;
31466 type = get_attr_type (last_scheduled_insn);
31468 /* Try to issue fixed point divides back-to-back in pairs so they will be
31469 routed to separate execution units and execute in parallel. */
31470 if (type == TYPE_DIV && divide_cnt == 0)
31472 /* First divide has been scheduled. */
31473 divide_cnt = 1;
31475 /* Scan the ready list looking for another divide, if found move it
31476 to the end of the list so it is chosen next. */
31477 pos = lastpos;
31478 while (pos >= 0)
31480 if (recog_memoized (ready[pos]) >= 0
31481 && get_attr_type (ready[pos]) == TYPE_DIV)
31483 tmp = ready[pos];
31484 for (i = pos; i < lastpos; i++)
31485 ready[i] = ready[i + 1];
31486 ready[lastpos] = tmp;
31487 break;
31489 pos--;
31492 else
31494 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31495 divide_cnt = 0;
31497 /* The best dispatch throughput for vector and vector load insns can be
31498 achieved by interleaving a vector and vector load such that they'll
31499 dispatch to the same superslice. If this pairing cannot be achieved
31500 then it is best to pair vector insns together and vector load insns
31501 together.
31503 To aid in this pairing, vec_pairing maintains the current state with
31504 the following values:
31506 0 : Initial state, no vecload/vector pairing has been started.
31508 1 : A vecload or vector insn has been issued and a candidate for
31509 pairing has been found and moved to the end of the ready
31510 list. */
31511 if (type == TYPE_VECLOAD)
31513 /* Issued a vecload. */
31514 if (vec_pairing == 0)
31516 int vecload_pos = -1;
31517 /* We issued a single vecload, look for a vector insn to pair it
31518 with. If one isn't found, try to pair another vecload. */
31519 pos = lastpos;
31520 while (pos >= 0)
31522 if (recog_memoized (ready[pos]) >= 0)
31524 type2 = get_attr_type (ready[pos]);
31525 if (is_power9_pairable_vec_type (type2))
31527 /* Found a vector insn to pair with, move it to the
31528 end of the ready list so it is scheduled next. */
31529 tmp = ready[pos];
31530 for (i = pos; i < lastpos; i++)
31531 ready[i] = ready[i + 1];
31532 ready[lastpos] = tmp;
31533 vec_pairing = 1;
31534 return cached_can_issue_more;
31536 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31537 /* Remember position of first vecload seen. */
31538 vecload_pos = pos;
31540 pos--;
31542 if (vecload_pos >= 0)
31544 /* Didn't find a vector to pair with but did find a vecload,
31545 move it to the end of the ready list. */
31546 tmp = ready[vecload_pos];
31547 for (i = vecload_pos; i < lastpos; i++)
31548 ready[i] = ready[i + 1];
31549 ready[lastpos] = tmp;
31550 vec_pairing = 1;
31551 return cached_can_issue_more;
31555 else if (is_power9_pairable_vec_type (type))
31557 /* Issued a vector operation. */
31558 if (vec_pairing == 0)
31560 int vec_pos = -1;
31561 /* We issued a single vector insn, look for a vecload to pair it
31562 with. If one isn't found, try to pair another vector. */
31563 pos = lastpos;
31564 while (pos >= 0)
31566 if (recog_memoized (ready[pos]) >= 0)
31568 type2 = get_attr_type (ready[pos]);
31569 if (type2 == TYPE_VECLOAD)
31571 /* Found a vecload insn to pair with, move it to the
31572 end of the ready list so it is scheduled next. */
31573 tmp = ready[pos];
31574 for (i = pos; i < lastpos; i++)
31575 ready[i] = ready[i + 1];
31576 ready[lastpos] = tmp;
31577 vec_pairing = 1;
31578 return cached_can_issue_more;
31580 else if (is_power9_pairable_vec_type (type2)
31581 && vec_pos == -1)
31582 /* Remember position of first vector insn seen. */
31583 vec_pos = pos;
31585 pos--;
31587 if (vec_pos >= 0)
31589 /* Didn't find a vecload to pair with but did find a vector
31590 insn, move it to the end of the ready list. */
31591 tmp = ready[vec_pos];
31592 for (i = vec_pos; i < lastpos; i++)
31593 ready[i] = ready[i + 1];
31594 ready[lastpos] = tmp;
31595 vec_pairing = 1;
31596 return cached_can_issue_more;
31601 /* We've either finished a vec/vecload pair, couldn't find an insn to
31602 continue the current pair, or the last insn had nothing to do with
31603 with pairing. In any case, reset the state. */
31604 vec_pairing = 0;
31607 return cached_can_issue_more;
31610 /* We are about to begin issuing insns for this clock cycle. */
31612 static int
31613 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31614 rtx_insn **ready ATTRIBUTE_UNUSED,
31615 int *pn_ready ATTRIBUTE_UNUSED,
31616 int clock_var ATTRIBUTE_UNUSED)
31618 int n_ready = *pn_ready;
31620 if (sched_verbose)
31621 fprintf (dump, "// rs6000_sched_reorder :\n");
31623 /* Reorder the ready list, if the second to last ready insn
31624 is a nonepipeline insn. */
31625 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31627 if (is_nonpipeline_insn (ready[n_ready - 1])
31628 && (recog_memoized (ready[n_ready - 2]) > 0))
31629 /* Simply swap first two insns. */
31630 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31633 if (rs6000_cpu == PROCESSOR_POWER6)
31634 load_store_pendulum = 0;
31636 return rs6000_issue_rate ();
31639 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31641 static int
31642 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31643 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31645 if (sched_verbose)
31646 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31648 /* For Power6, we need to handle some special cases to try and keep the
31649 store queue from overflowing and triggering expensive flushes.
31651 This code monitors how load and store instructions are being issued
31652 and skews the ready list one way or the other to increase the likelihood
31653 that a desired instruction is issued at the proper time.
31655 A couple of things are done. First, we maintain a "load_store_pendulum"
31656 to track the current state of load/store issue.
31658 - If the pendulum is at zero, then no loads or stores have been
31659 issued in the current cycle so we do nothing.
31661 - If the pendulum is 1, then a single load has been issued in this
31662 cycle and we attempt to locate another load in the ready list to
31663 issue with it.
31665 - If the pendulum is -2, then two stores have already been
31666 issued in this cycle, so we increase the priority of the first load
31667 in the ready list to increase it's likelihood of being chosen first
31668 in the next cycle.
31670 - If the pendulum is -1, then a single store has been issued in this
31671 cycle and we attempt to locate another store in the ready list to
31672 issue with it, preferring a store to an adjacent memory location to
31673 facilitate store pairing in the store queue.
31675 - If the pendulum is 2, then two loads have already been
31676 issued in this cycle, so we increase the priority of the first store
31677 in the ready list to increase it's likelihood of being chosen first
31678 in the next cycle.
31680 - If the pendulum < -2 or > 2, then do nothing.
31682 Note: This code covers the most common scenarios. There exist non
31683 load/store instructions which make use of the LSU and which
31684 would need to be accounted for to strictly model the behavior
31685 of the machine. Those instructions are currently unaccounted
31686 for to help minimize compile time overhead of this code.
31688 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31690 int pos;
31691 int i;
31692 rtx_insn *tmp;
31693 rtx load_mem, str_mem;
31695 if (is_store_insn (last_scheduled_insn, &str_mem))
31696 /* Issuing a store, swing the load_store_pendulum to the left */
31697 load_store_pendulum--;
31698 else if (is_load_insn (last_scheduled_insn, &load_mem))
31699 /* Issuing a load, swing the load_store_pendulum to the right */
31700 load_store_pendulum++;
31701 else
31702 return cached_can_issue_more;
31704 /* If the pendulum is balanced, or there is only one instruction on
31705 the ready list, then all is well, so return. */
31706 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31707 return cached_can_issue_more;
31709 if (load_store_pendulum == 1)
31711 /* A load has been issued in this cycle. Scan the ready list
31712 for another load to issue with it */
31713 pos = *pn_ready-1;
31715 while (pos >= 0)
31717 if (is_load_insn (ready[pos], &load_mem))
31719 /* Found a load. Move it to the head of the ready list,
31720 and adjust it's priority so that it is more likely to
31721 stay there */
31722 tmp = ready[pos];
31723 for (i=pos; i<*pn_ready-1; i++)
31724 ready[i] = ready[i + 1];
31725 ready[*pn_ready-1] = tmp;
31727 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31728 INSN_PRIORITY (tmp)++;
31729 break;
31731 pos--;
31734 else if (load_store_pendulum == -2)
31736 /* Two stores have been issued in this cycle. Increase the
31737 priority of the first load in the ready list to favor it for
31738 issuing in the next cycle. */
31739 pos = *pn_ready-1;
31741 while (pos >= 0)
31743 if (is_load_insn (ready[pos], &load_mem)
31744 && !sel_sched_p ()
31745 && INSN_PRIORITY_KNOWN (ready[pos]))
31747 INSN_PRIORITY (ready[pos])++;
31749 /* Adjust the pendulum to account for the fact that a load
31750 was found and increased in priority. This is to prevent
31751 increasing the priority of multiple loads */
31752 load_store_pendulum--;
31754 break;
31756 pos--;
31759 else if (load_store_pendulum == -1)
31761 /* A store has been issued in this cycle. Scan the ready list for
31762 another store to issue with it, preferring a store to an adjacent
31763 memory location */
31764 int first_store_pos = -1;
31766 pos = *pn_ready-1;
31768 while (pos >= 0)
31770 if (is_store_insn (ready[pos], &str_mem))
31772 rtx str_mem2;
31773 /* Maintain the index of the first store found on the
31774 list */
31775 if (first_store_pos == -1)
31776 first_store_pos = pos;
31778 if (is_store_insn (last_scheduled_insn, &str_mem2)
31779 && adjacent_mem_locations (str_mem, str_mem2))
31781 /* Found an adjacent store. Move it to the head of the
31782 ready list, and adjust it's priority so that it is
31783 more likely to stay there */
31784 tmp = ready[pos];
31785 for (i=pos; i<*pn_ready-1; i++)
31786 ready[i] = ready[i + 1];
31787 ready[*pn_ready-1] = tmp;
31789 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31790 INSN_PRIORITY (tmp)++;
31792 first_store_pos = -1;
31794 break;
31797 pos--;
31800 if (first_store_pos >= 0)
31802 /* An adjacent store wasn't found, but a non-adjacent store was,
31803 so move the non-adjacent store to the front of the ready
31804 list, and adjust its priority so that it is more likely to
31805 stay there. */
31806 tmp = ready[first_store_pos];
31807 for (i=first_store_pos; i<*pn_ready-1; i++)
31808 ready[i] = ready[i + 1];
31809 ready[*pn_ready-1] = tmp;
31810 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31811 INSN_PRIORITY (tmp)++;
31814 else if (load_store_pendulum == 2)
31816 /* Two loads have been issued in this cycle. Increase the priority
31817 of the first store in the ready list to favor it for issuing in
31818 the next cycle. */
31819 pos = *pn_ready-1;
31821 while (pos >= 0)
31823 if (is_store_insn (ready[pos], &str_mem)
31824 && !sel_sched_p ()
31825 && INSN_PRIORITY_KNOWN (ready[pos]))
31827 INSN_PRIORITY (ready[pos])++;
31829 /* Adjust the pendulum to account for the fact that a store
31830 was found and increased in priority. This is to prevent
31831 increasing the priority of multiple stores */
31832 load_store_pendulum++;
31834 break;
31836 pos--;
31841 /* Do Power9 dependent reordering if necessary. */
31842 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31843 && recog_memoized (last_scheduled_insn) >= 0)
31844 return power9_sched_reorder2 (ready, *pn_ready - 1);
31846 return cached_can_issue_more;
31849 /* Return whether the presence of INSN causes a dispatch group termination
31850 of group WHICH_GROUP.
31852 If WHICH_GROUP == current_group, this function will return true if INSN
31853 causes the termination of the current group (i.e, the dispatch group to
31854 which INSN belongs). This means that INSN will be the last insn in the
31855 group it belongs to.
31857 If WHICH_GROUP == previous_group, this function will return true if INSN
31858 causes the termination of the previous group (i.e, the dispatch group that
31859 precedes the group to which INSN belongs). This means that INSN will be
31860 the first insn in the group it belongs to). */
31862 static bool
31863 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31865 bool first, last;
31867 if (! insn)
31868 return false;
31870 first = insn_must_be_first_in_group (insn);
31871 last = insn_must_be_last_in_group (insn);
31873 if (first && last)
31874 return true;
31876 if (which_group == current_group)
31877 return last;
31878 else if (which_group == previous_group)
31879 return first;
31881 return false;
31885 static bool
31886 insn_must_be_first_in_group (rtx_insn *insn)
31888 enum attr_type type;
31890 if (!insn
31891 || NOTE_P (insn)
31892 || DEBUG_INSN_P (insn)
31893 || GET_CODE (PATTERN (insn)) == USE
31894 || GET_CODE (PATTERN (insn)) == CLOBBER)
31895 return false;
31897 switch (rs6000_cpu)
31899 case PROCESSOR_POWER5:
31900 if (is_cracked_insn (insn))
31901 return true;
31902 /* FALLTHRU */
31903 case PROCESSOR_POWER4:
31904 if (is_microcoded_insn (insn))
31905 return true;
31907 if (!rs6000_sched_groups)
31908 return false;
31910 type = get_attr_type (insn);
31912 switch (type)
31914 case TYPE_MFCR:
31915 case TYPE_MFCRF:
31916 case TYPE_MTCR:
31917 case TYPE_DELAYED_CR:
31918 case TYPE_CR_LOGICAL:
31919 case TYPE_MTJMPR:
31920 case TYPE_MFJMPR:
31921 case TYPE_DIV:
31922 case TYPE_LOAD_L:
31923 case TYPE_STORE_C:
31924 case TYPE_ISYNC:
31925 case TYPE_SYNC:
31926 return true;
31927 default:
31928 break;
31930 break;
31931 case PROCESSOR_POWER6:
31932 type = get_attr_type (insn);
31934 switch (type)
31936 case TYPE_EXTS:
31937 case TYPE_CNTLZ:
31938 case TYPE_TRAP:
31939 case TYPE_MUL:
31940 case TYPE_INSERT:
31941 case TYPE_FPCOMPARE:
31942 case TYPE_MFCR:
31943 case TYPE_MTCR:
31944 case TYPE_MFJMPR:
31945 case TYPE_MTJMPR:
31946 case TYPE_ISYNC:
31947 case TYPE_SYNC:
31948 case TYPE_LOAD_L:
31949 case TYPE_STORE_C:
31950 return true;
31951 case TYPE_SHIFT:
31952 if (get_attr_dot (insn) == DOT_NO
31953 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31954 return true;
31955 else
31956 break;
31957 case TYPE_DIV:
31958 if (get_attr_size (insn) == SIZE_32)
31959 return true;
31960 else
31961 break;
31962 case TYPE_LOAD:
31963 case TYPE_STORE:
31964 case TYPE_FPLOAD:
31965 case TYPE_FPSTORE:
31966 if (get_attr_update (insn) == UPDATE_YES)
31967 return true;
31968 else
31969 break;
31970 default:
31971 break;
31973 break;
31974 case PROCESSOR_POWER7:
31975 type = get_attr_type (insn);
31977 switch (type)
31979 case TYPE_CR_LOGICAL:
31980 case TYPE_MFCR:
31981 case TYPE_MFCRF:
31982 case TYPE_MTCR:
31983 case TYPE_DIV:
31984 case TYPE_ISYNC:
31985 case TYPE_LOAD_L:
31986 case TYPE_STORE_C:
31987 case TYPE_MFJMPR:
31988 case TYPE_MTJMPR:
31989 return true;
31990 case TYPE_MUL:
31991 case TYPE_SHIFT:
31992 case TYPE_EXTS:
31993 if (get_attr_dot (insn) == DOT_YES)
31994 return true;
31995 else
31996 break;
31997 case TYPE_LOAD:
31998 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31999 || get_attr_update (insn) == UPDATE_YES)
32000 return true;
32001 else
32002 break;
32003 case TYPE_STORE:
32004 case TYPE_FPLOAD:
32005 case TYPE_FPSTORE:
32006 if (get_attr_update (insn) == UPDATE_YES)
32007 return true;
32008 else
32009 break;
32010 default:
32011 break;
32013 break;
32014 case PROCESSOR_POWER8:
32015 type = get_attr_type (insn);
32017 switch (type)
32019 case TYPE_CR_LOGICAL:
32020 case TYPE_DELAYED_CR:
32021 case TYPE_MFCR:
32022 case TYPE_MFCRF:
32023 case TYPE_MTCR:
32024 case TYPE_SYNC:
32025 case TYPE_ISYNC:
32026 case TYPE_LOAD_L:
32027 case TYPE_STORE_C:
32028 case TYPE_VECSTORE:
32029 case TYPE_MFJMPR:
32030 case TYPE_MTJMPR:
32031 return true;
32032 case TYPE_SHIFT:
32033 case TYPE_EXTS:
32034 case TYPE_MUL:
32035 if (get_attr_dot (insn) == DOT_YES)
32036 return true;
32037 else
32038 break;
32039 case TYPE_LOAD:
32040 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32041 || get_attr_update (insn) == UPDATE_YES)
32042 return true;
32043 else
32044 break;
32045 case TYPE_STORE:
32046 if (get_attr_update (insn) == UPDATE_YES
32047 && get_attr_indexed (insn) == INDEXED_YES)
32048 return true;
32049 else
32050 break;
32051 default:
32052 break;
32054 break;
32055 default:
32056 break;
32059 return false;
32062 static bool
32063 insn_must_be_last_in_group (rtx_insn *insn)
32065 enum attr_type type;
32067 if (!insn
32068 || NOTE_P (insn)
32069 || DEBUG_INSN_P (insn)
32070 || GET_CODE (PATTERN (insn)) == USE
32071 || GET_CODE (PATTERN (insn)) == CLOBBER)
32072 return false;
32074 switch (rs6000_cpu) {
32075 case PROCESSOR_POWER4:
32076 case PROCESSOR_POWER5:
32077 if (is_microcoded_insn (insn))
32078 return true;
32080 if (is_branch_slot_insn (insn))
32081 return true;
32083 break;
32084 case PROCESSOR_POWER6:
32085 type = get_attr_type (insn);
32087 switch (type)
32089 case TYPE_EXTS:
32090 case TYPE_CNTLZ:
32091 case TYPE_TRAP:
32092 case TYPE_MUL:
32093 case TYPE_FPCOMPARE:
32094 case TYPE_MFCR:
32095 case TYPE_MTCR:
32096 case TYPE_MFJMPR:
32097 case TYPE_MTJMPR:
32098 case TYPE_ISYNC:
32099 case TYPE_SYNC:
32100 case TYPE_LOAD_L:
32101 case TYPE_STORE_C:
32102 return true;
32103 case TYPE_SHIFT:
32104 if (get_attr_dot (insn) == DOT_NO
32105 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32106 return true;
32107 else
32108 break;
32109 case TYPE_DIV:
32110 if (get_attr_size (insn) == SIZE_32)
32111 return true;
32112 else
32113 break;
32114 default:
32115 break;
32117 break;
32118 case PROCESSOR_POWER7:
32119 type = get_attr_type (insn);
32121 switch (type)
32123 case TYPE_ISYNC:
32124 case TYPE_SYNC:
32125 case TYPE_LOAD_L:
32126 case TYPE_STORE_C:
32127 return true;
32128 case TYPE_LOAD:
32129 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32130 && get_attr_update (insn) == UPDATE_YES)
32131 return true;
32132 else
32133 break;
32134 case TYPE_STORE:
32135 if (get_attr_update (insn) == UPDATE_YES
32136 && get_attr_indexed (insn) == INDEXED_YES)
32137 return true;
32138 else
32139 break;
32140 default:
32141 break;
32143 break;
32144 case PROCESSOR_POWER8:
32145 type = get_attr_type (insn);
32147 switch (type)
32149 case TYPE_MFCR:
32150 case TYPE_MTCR:
32151 case TYPE_ISYNC:
32152 case TYPE_SYNC:
32153 case TYPE_LOAD_L:
32154 case TYPE_STORE_C:
32155 return true;
32156 case TYPE_LOAD:
32157 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32158 && get_attr_update (insn) == UPDATE_YES)
32159 return true;
32160 else
32161 break;
32162 case TYPE_STORE:
32163 if (get_attr_update (insn) == UPDATE_YES
32164 && get_attr_indexed (insn) == INDEXED_YES)
32165 return true;
32166 else
32167 break;
32168 default:
32169 break;
32171 break;
32172 default:
32173 break;
32176 return false;
32179 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32180 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32182 static bool
32183 is_costly_group (rtx *group_insns, rtx next_insn)
32185 int i;
32186 int issue_rate = rs6000_issue_rate ();
32188 for (i = 0; i < issue_rate; i++)
32190 sd_iterator_def sd_it;
32191 dep_t dep;
32192 rtx insn = group_insns[i];
32194 if (!insn)
32195 continue;
32197 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32199 rtx next = DEP_CON (dep);
32201 if (next == next_insn
32202 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32203 return true;
32207 return false;
32210 /* Utility of the function redefine_groups.
32211 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32212 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32213 to keep it "far" (in a separate group) from GROUP_INSNS, following
32214 one of the following schemes, depending on the value of the flag
32215 -minsert_sched_nops = X:
32216 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32217 in order to force NEXT_INSN into a separate group.
32218 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32219 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32220 insertion (has a group just ended, how many vacant issue slots remain in the
32221 last group, and how many dispatch groups were encountered so far). */
32223 static int
32224 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32225 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32226 int *group_count)
32228 rtx nop;
32229 bool force;
32230 int issue_rate = rs6000_issue_rate ();
32231 bool end = *group_end;
32232 int i;
32234 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32235 return can_issue_more;
32237 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32238 return can_issue_more;
32240 force = is_costly_group (group_insns, next_insn);
32241 if (!force)
32242 return can_issue_more;
32244 if (sched_verbose > 6)
32245 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32246 *group_count ,can_issue_more);
32248 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32250 if (*group_end)
32251 can_issue_more = 0;
32253 /* Since only a branch can be issued in the last issue_slot, it is
32254 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32255 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32256 in this case the last nop will start a new group and the branch
32257 will be forced to the new group. */
32258 if (can_issue_more && !is_branch_slot_insn (next_insn))
32259 can_issue_more--;
32261 /* Do we have a special group ending nop? */
32262 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
32263 || rs6000_cpu_attr == CPU_POWER8)
32265 nop = gen_group_ending_nop ();
32266 emit_insn_before (nop, next_insn);
32267 can_issue_more = 0;
32269 else
32270 while (can_issue_more > 0)
32272 nop = gen_nop ();
32273 emit_insn_before (nop, next_insn);
32274 can_issue_more--;
32277 *group_end = true;
32278 return 0;
32281 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32283 int n_nops = rs6000_sched_insert_nops;
32285 /* Nops can't be issued from the branch slot, so the effective
32286 issue_rate for nops is 'issue_rate - 1'. */
32287 if (can_issue_more == 0)
32288 can_issue_more = issue_rate;
32289 can_issue_more--;
32290 if (can_issue_more == 0)
32292 can_issue_more = issue_rate - 1;
32293 (*group_count)++;
32294 end = true;
32295 for (i = 0; i < issue_rate; i++)
32297 group_insns[i] = 0;
32301 while (n_nops > 0)
32303 nop = gen_nop ();
32304 emit_insn_before (nop, next_insn);
32305 if (can_issue_more == issue_rate - 1) /* new group begins */
32306 end = false;
32307 can_issue_more--;
32308 if (can_issue_more == 0)
32310 can_issue_more = issue_rate - 1;
32311 (*group_count)++;
32312 end = true;
32313 for (i = 0; i < issue_rate; i++)
32315 group_insns[i] = 0;
32318 n_nops--;
32321 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32322 can_issue_more++;
32324 /* Is next_insn going to start a new group? */
32325 *group_end
32326 = (end
32327 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32328 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32329 || (can_issue_more < issue_rate &&
32330 insn_terminates_group_p (next_insn, previous_group)));
32331 if (*group_end && end)
32332 (*group_count)--;
32334 if (sched_verbose > 6)
32335 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32336 *group_count, can_issue_more);
32337 return can_issue_more;
32340 return can_issue_more;
32343 /* This function tries to synch the dispatch groups that the compiler "sees"
32344 with the dispatch groups that the processor dispatcher is expected to
32345 form in practice. It tries to achieve this synchronization by forcing the
32346 estimated processor grouping on the compiler (as opposed to the function
32347 'pad_goups' which tries to force the scheduler's grouping on the processor).
32349 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32350 examines the (estimated) dispatch groups that will be formed by the processor
32351 dispatcher. It marks these group boundaries to reflect the estimated
32352 processor grouping, overriding the grouping that the scheduler had marked.
32353 Depending on the value of the flag '-minsert-sched-nops' this function can
32354 force certain insns into separate groups or force a certain distance between
32355 them by inserting nops, for example, if there exists a "costly dependence"
32356 between the insns.
32358 The function estimates the group boundaries that the processor will form as
32359 follows: It keeps track of how many vacant issue slots are available after
32360 each insn. A subsequent insn will start a new group if one of the following
32361 4 cases applies:
32362 - no more vacant issue slots remain in the current dispatch group.
32363 - only the last issue slot, which is the branch slot, is vacant, but the next
32364 insn is not a branch.
32365 - only the last 2 or less issue slots, including the branch slot, are vacant,
32366 which means that a cracked insn (which occupies two issue slots) can't be
32367 issued in this group.
32368 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32369 start a new group. */
32371 static int
32372 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32373 rtx_insn *tail)
32375 rtx_insn *insn, *next_insn;
32376 int issue_rate;
32377 int can_issue_more;
32378 int slot, i;
32379 bool group_end;
32380 int group_count = 0;
32381 rtx *group_insns;
32383 /* Initialize. */
32384 issue_rate = rs6000_issue_rate ();
32385 group_insns = XALLOCAVEC (rtx, issue_rate);
32386 for (i = 0; i < issue_rate; i++)
32388 group_insns[i] = 0;
32390 can_issue_more = issue_rate;
32391 slot = 0;
32392 insn = get_next_active_insn (prev_head_insn, tail);
32393 group_end = false;
32395 while (insn != NULL_RTX)
32397 slot = (issue_rate - can_issue_more);
32398 group_insns[slot] = insn;
32399 can_issue_more =
32400 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32401 if (insn_terminates_group_p (insn, current_group))
32402 can_issue_more = 0;
32404 next_insn = get_next_active_insn (insn, tail);
32405 if (next_insn == NULL_RTX)
32406 return group_count + 1;
32408 /* Is next_insn going to start a new group? */
32409 group_end
32410 = (can_issue_more == 0
32411 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32412 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32413 || (can_issue_more < issue_rate &&
32414 insn_terminates_group_p (next_insn, previous_group)));
32416 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32417 next_insn, &group_end, can_issue_more,
32418 &group_count);
32420 if (group_end)
32422 group_count++;
32423 can_issue_more = 0;
32424 for (i = 0; i < issue_rate; i++)
32426 group_insns[i] = 0;
32430 if (GET_MODE (next_insn) == TImode && can_issue_more)
32431 PUT_MODE (next_insn, VOIDmode);
32432 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32433 PUT_MODE (next_insn, TImode);
32435 insn = next_insn;
32436 if (can_issue_more == 0)
32437 can_issue_more = issue_rate;
32438 } /* while */
32440 return group_count;
32443 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32444 dispatch group boundaries that the scheduler had marked. Pad with nops
32445 any dispatch groups which have vacant issue slots, in order to force the
32446 scheduler's grouping on the processor dispatcher. The function
32447 returns the number of dispatch groups found. */
32449 static int
32450 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32451 rtx_insn *tail)
32453 rtx_insn *insn, *next_insn;
32454 rtx nop;
32455 int issue_rate;
32456 int can_issue_more;
32457 int group_end;
32458 int group_count = 0;
32460 /* Initialize issue_rate. */
32461 issue_rate = rs6000_issue_rate ();
32462 can_issue_more = issue_rate;
32464 insn = get_next_active_insn (prev_head_insn, tail);
32465 next_insn = get_next_active_insn (insn, tail);
32467 while (insn != NULL_RTX)
32469 can_issue_more =
32470 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32472 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32474 if (next_insn == NULL_RTX)
32475 break;
32477 if (group_end)
32479 /* If the scheduler had marked group termination at this location
32480 (between insn and next_insn), and neither insn nor next_insn will
32481 force group termination, pad the group with nops to force group
32482 termination. */
32483 if (can_issue_more
32484 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32485 && !insn_terminates_group_p (insn, current_group)
32486 && !insn_terminates_group_p (next_insn, previous_group))
32488 if (!is_branch_slot_insn (next_insn))
32489 can_issue_more--;
32491 while (can_issue_more)
32493 nop = gen_nop ();
32494 emit_insn_before (nop, next_insn);
32495 can_issue_more--;
32499 can_issue_more = issue_rate;
32500 group_count++;
32503 insn = next_insn;
32504 next_insn = get_next_active_insn (insn, tail);
32507 return group_count;
32510 /* We're beginning a new block. Initialize data structures as necessary. */
32512 static void
32513 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32514 int sched_verbose ATTRIBUTE_UNUSED,
32515 int max_ready ATTRIBUTE_UNUSED)
32517 last_scheduled_insn = NULL;
32518 load_store_pendulum = 0;
32519 divide_cnt = 0;
32520 vec_pairing = 0;
32523 /* The following function is called at the end of scheduling BB.
32524 After reload, it inserts nops at insn group bundling. */
32526 static void
32527 rs6000_sched_finish (FILE *dump, int sched_verbose)
32529 int n_groups;
32531 if (sched_verbose)
32532 fprintf (dump, "=== Finishing schedule.\n");
32534 if (reload_completed && rs6000_sched_groups)
32536 /* Do not run sched_finish hook when selective scheduling enabled. */
32537 if (sel_sched_p ())
32538 return;
32540 if (rs6000_sched_insert_nops == sched_finish_none)
32541 return;
32543 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32544 n_groups = pad_groups (dump, sched_verbose,
32545 current_sched_info->prev_head,
32546 current_sched_info->next_tail);
32547 else
32548 n_groups = redefine_groups (dump, sched_verbose,
32549 current_sched_info->prev_head,
32550 current_sched_info->next_tail);
32552 if (sched_verbose >= 6)
32554 fprintf (dump, "ngroups = %d\n", n_groups);
32555 print_rtl (dump, current_sched_info->prev_head);
32556 fprintf (dump, "Done finish_sched\n");
32561 struct rs6000_sched_context
32563 short cached_can_issue_more;
32564 rtx_insn *last_scheduled_insn;
32565 int load_store_pendulum;
32566 int divide_cnt;
32567 int vec_pairing;
32570 typedef struct rs6000_sched_context rs6000_sched_context_def;
32571 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32573 /* Allocate store for new scheduling context. */
32574 static void *
32575 rs6000_alloc_sched_context (void)
32577 return xmalloc (sizeof (rs6000_sched_context_def));
32580 /* If CLEAN_P is true then initializes _SC with clean data,
32581 and from the global context otherwise. */
32582 static void
32583 rs6000_init_sched_context (void *_sc, bool clean_p)
32585 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32587 if (clean_p)
32589 sc->cached_can_issue_more = 0;
32590 sc->last_scheduled_insn = NULL;
32591 sc->load_store_pendulum = 0;
32592 sc->divide_cnt = 0;
32593 sc->vec_pairing = 0;
32595 else
32597 sc->cached_can_issue_more = cached_can_issue_more;
32598 sc->last_scheduled_insn = last_scheduled_insn;
32599 sc->load_store_pendulum = load_store_pendulum;
32600 sc->divide_cnt = divide_cnt;
32601 sc->vec_pairing = vec_pairing;
32605 /* Sets the global scheduling context to the one pointed to by _SC. */
32606 static void
32607 rs6000_set_sched_context (void *_sc)
32609 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32611 gcc_assert (sc != NULL);
32613 cached_can_issue_more = sc->cached_can_issue_more;
32614 last_scheduled_insn = sc->last_scheduled_insn;
32615 load_store_pendulum = sc->load_store_pendulum;
32616 divide_cnt = sc->divide_cnt;
32617 vec_pairing = sc->vec_pairing;
32620 /* Free _SC. */
32621 static void
32622 rs6000_free_sched_context (void *_sc)
32624 gcc_assert (_sc != NULL);
32626 free (_sc);
32629 static bool
32630 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32632 switch (get_attr_type (insn))
32634 case TYPE_DIV:
32635 case TYPE_SDIV:
32636 case TYPE_DDIV:
32637 case TYPE_VECDIV:
32638 case TYPE_SSQRT:
32639 case TYPE_DSQRT:
32640 return false;
32642 default:
32643 return true;
32647 /* Length in units of the trampoline for entering a nested function. */
32650 rs6000_trampoline_size (void)
32652 int ret = 0;
32654 switch (DEFAULT_ABI)
32656 default:
32657 gcc_unreachable ();
32659 case ABI_AIX:
32660 ret = (TARGET_32BIT) ? 12 : 24;
32661 break;
32663 case ABI_ELFv2:
32664 gcc_assert (!TARGET_32BIT);
32665 ret = 32;
32666 break;
32668 case ABI_DARWIN:
32669 case ABI_V4:
32670 ret = (TARGET_32BIT) ? 40 : 48;
32671 break;
32674 return ret;
32677 /* Emit RTL insns to initialize the variable parts of a trampoline.
32678 FNADDR is an RTX for the address of the function's pure code.
32679 CXT is an RTX for the static chain value for the function. */
32681 static void
32682 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32684 int regsize = (TARGET_32BIT) ? 4 : 8;
32685 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32686 rtx ctx_reg = force_reg (Pmode, cxt);
32687 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32689 switch (DEFAULT_ABI)
32691 default:
32692 gcc_unreachable ();
32694 /* Under AIX, just build the 3 word function descriptor */
32695 case ABI_AIX:
32697 rtx fnmem, fn_reg, toc_reg;
32699 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32700 error ("you cannot take the address of a nested function if you use "
32701 "the %qs option", "-mno-pointers-to-nested-functions");
32703 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32704 fn_reg = gen_reg_rtx (Pmode);
32705 toc_reg = gen_reg_rtx (Pmode);
32707 /* Macro to shorten the code expansions below. */
32708 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32710 m_tramp = replace_equiv_address (m_tramp, addr);
32712 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32713 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32714 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32715 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32716 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32718 # undef MEM_PLUS
32720 break;
32722 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32723 case ABI_ELFv2:
32724 case ABI_DARWIN:
32725 case ABI_V4:
32726 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32727 LCT_NORMAL, VOIDmode,
32728 addr, Pmode,
32729 GEN_INT (rs6000_trampoline_size ()), SImode,
32730 fnaddr, Pmode,
32731 ctx_reg, Pmode);
32732 break;
32737 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32738 identifier as an argument, so the front end shouldn't look it up. */
32740 static bool
32741 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32743 return is_attribute_p ("altivec", attr_id);
32746 /* Handle the "altivec" attribute. The attribute may have
32747 arguments as follows:
32749 __attribute__((altivec(vector__)))
32750 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32751 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32753 and may appear more than once (e.g., 'vector bool char') in a
32754 given declaration. */
32756 static tree
32757 rs6000_handle_altivec_attribute (tree *node,
32758 tree name ATTRIBUTE_UNUSED,
32759 tree args,
32760 int flags ATTRIBUTE_UNUSED,
32761 bool *no_add_attrs)
32763 tree type = *node, result = NULL_TREE;
32764 machine_mode mode;
32765 int unsigned_p;
32766 char altivec_type
32767 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32768 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32769 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32770 : '?');
32772 while (POINTER_TYPE_P (type)
32773 || TREE_CODE (type) == FUNCTION_TYPE
32774 || TREE_CODE (type) == METHOD_TYPE
32775 || TREE_CODE (type) == ARRAY_TYPE)
32776 type = TREE_TYPE (type);
32778 mode = TYPE_MODE (type);
32780 /* Check for invalid AltiVec type qualifiers. */
32781 if (type == long_double_type_node)
32782 error ("use of %<long double%> in AltiVec types is invalid");
32783 else if (type == boolean_type_node)
32784 error ("use of boolean types in AltiVec types is invalid");
32785 else if (TREE_CODE (type) == COMPLEX_TYPE)
32786 error ("use of %<complex%> in AltiVec types is invalid");
32787 else if (DECIMAL_FLOAT_MODE_P (mode))
32788 error ("use of decimal floating point types in AltiVec types is invalid");
32789 else if (!TARGET_VSX)
32791 if (type == long_unsigned_type_node || type == long_integer_type_node)
32793 if (TARGET_64BIT)
32794 error ("use of %<long%> in AltiVec types is invalid for "
32795 "64-bit code without %qs", "-mvsx");
32796 else if (rs6000_warn_altivec_long)
32797 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32798 "use %<int%>");
32800 else if (type == long_long_unsigned_type_node
32801 || type == long_long_integer_type_node)
32802 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32803 "-mvsx");
32804 else if (type == double_type_node)
32805 error ("use of %<double%> in AltiVec types is invalid without %qs",
32806 "-mvsx");
32809 switch (altivec_type)
32811 case 'v':
32812 unsigned_p = TYPE_UNSIGNED (type);
32813 switch (mode)
32815 case E_TImode:
32816 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32817 break;
32818 case E_DImode:
32819 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32820 break;
32821 case E_SImode:
32822 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32823 break;
32824 case E_HImode:
32825 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32826 break;
32827 case E_QImode:
32828 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32829 break;
32830 case E_SFmode: result = V4SF_type_node; break;
32831 case E_DFmode: result = V2DF_type_node; break;
32832 /* If the user says 'vector int bool', we may be handed the 'bool'
32833 attribute _before_ the 'vector' attribute, and so select the
32834 proper type in the 'b' case below. */
32835 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32836 case E_V2DImode: case E_V2DFmode:
32837 result = type;
32838 default: break;
32840 break;
32841 case 'b':
32842 switch (mode)
32844 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32845 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32846 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32847 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32848 default: break;
32850 break;
32851 case 'p':
32852 switch (mode)
32854 case E_V8HImode: result = pixel_V8HI_type_node;
32855 default: break;
32857 default: break;
32860 /* Propagate qualifiers attached to the element type
32861 onto the vector type. */
32862 if (result && result != type && TYPE_QUALS (type))
32863 result = build_qualified_type (result, TYPE_QUALS (type));
32865 *no_add_attrs = true; /* No need to hang on to the attribute. */
32867 if (result)
32868 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32870 return NULL_TREE;
32873 /* AltiVec defines four built-in scalar types that serve as vector
32874 elements; we must teach the compiler how to mangle them. */
32876 static const char *
32877 rs6000_mangle_type (const_tree type)
32879 type = TYPE_MAIN_VARIANT (type);
32881 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32882 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32883 return NULL;
32885 if (type == bool_char_type_node) return "U6__boolc";
32886 if (type == bool_short_type_node) return "U6__bools";
32887 if (type == pixel_type_node) return "u7__pixel";
32888 if (type == bool_int_type_node) return "U6__booli";
32889 if (type == bool_long_type_node) return "U6__booll";
32891 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32892 "g" for IBM extended double, no matter whether it is long double (using
32893 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32894 if (TARGET_FLOAT128_TYPE)
32896 if (type == ieee128_float_type_node)
32897 return "U10__float128";
32899 if (TARGET_LONG_DOUBLE_128)
32901 if (type == long_double_type_node)
32902 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32904 if (type == ibm128_float_type_node)
32905 return "g";
32909 /* Mangle IBM extended float long double as `g' (__float128) on
32910 powerpc*-linux where long-double-64 previously was the default. */
32911 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32912 && TARGET_ELF
32913 && TARGET_LONG_DOUBLE_128
32914 && !TARGET_IEEEQUAD)
32915 return "g";
32917 /* For all other types, use normal C++ mangling. */
32918 return NULL;
32921 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32922 struct attribute_spec.handler. */
32924 static tree
32925 rs6000_handle_longcall_attribute (tree *node, tree name,
32926 tree args ATTRIBUTE_UNUSED,
32927 int flags ATTRIBUTE_UNUSED,
32928 bool *no_add_attrs)
32930 if (TREE_CODE (*node) != FUNCTION_TYPE
32931 && TREE_CODE (*node) != FIELD_DECL
32932 && TREE_CODE (*node) != TYPE_DECL)
32934 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32935 name);
32936 *no_add_attrs = true;
32939 return NULL_TREE;
32942 /* Set longcall attributes on all functions declared when
32943 rs6000_default_long_calls is true. */
32944 static void
32945 rs6000_set_default_type_attributes (tree type)
32947 if (rs6000_default_long_calls
32948 && (TREE_CODE (type) == FUNCTION_TYPE
32949 || TREE_CODE (type) == METHOD_TYPE))
32950 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32951 NULL_TREE,
32952 TYPE_ATTRIBUTES (type));
32954 #if TARGET_MACHO
32955 darwin_set_default_type_attributes (type);
32956 #endif
32959 /* Return a reference suitable for calling a function with the
32960 longcall attribute. */
32963 rs6000_longcall_ref (rtx call_ref)
32965 const char *call_name;
32966 tree node;
32968 if (GET_CODE (call_ref) != SYMBOL_REF)
32969 return call_ref;
32971 /* System V adds '.' to the internal name, so skip them. */
32972 call_name = XSTR (call_ref, 0);
32973 if (*call_name == '.')
32975 while (*call_name == '.')
32976 call_name++;
32978 node = get_identifier (call_name);
32979 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32982 return force_reg (Pmode, call_ref);
32985 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32986 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32987 #endif
32989 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32990 struct attribute_spec.handler. */
32991 static tree
32992 rs6000_handle_struct_attribute (tree *node, tree name,
32993 tree args ATTRIBUTE_UNUSED,
32994 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32996 tree *type = NULL;
32997 if (DECL_P (*node))
32999 if (TREE_CODE (*node) == TYPE_DECL)
33000 type = &TREE_TYPE (*node);
33002 else
33003 type = node;
33005 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
33006 || TREE_CODE (*type) == UNION_TYPE)))
33008 warning (OPT_Wattributes, "%qE attribute ignored", name);
33009 *no_add_attrs = true;
33012 else if ((is_attribute_p ("ms_struct", name)
33013 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
33014 || ((is_attribute_p ("gcc_struct", name)
33015 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
33017 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
33018 name);
33019 *no_add_attrs = true;
33022 return NULL_TREE;
33025 static bool
33026 rs6000_ms_bitfield_layout_p (const_tree record_type)
33028 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
33029 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
33030 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
33033 #ifdef USING_ELFOS_H
33035 /* A get_unnamed_section callback, used for switching to toc_section. */
33037 static void
33038 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33040 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33041 && TARGET_MINIMAL_TOC)
33043 if (!toc_initialized)
33045 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33046 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33047 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33048 fprintf (asm_out_file, "\t.tc ");
33049 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33050 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33051 fprintf (asm_out_file, "\n");
33053 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33054 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33055 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33056 fprintf (asm_out_file, " = .+32768\n");
33057 toc_initialized = 1;
33059 else
33060 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33062 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33064 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33065 if (!toc_initialized)
33067 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33068 toc_initialized = 1;
33071 else
33073 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33074 if (!toc_initialized)
33076 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33077 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33078 fprintf (asm_out_file, " = .+32768\n");
33079 toc_initialized = 1;
33084 /* Implement TARGET_ASM_INIT_SECTIONS. */
33086 static void
33087 rs6000_elf_asm_init_sections (void)
33089 toc_section
33090 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33092 sdata2_section
33093 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33094 SDATA2_SECTION_ASM_OP);
33097 /* Implement TARGET_SELECT_RTX_SECTION. */
33099 static section *
33100 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33101 unsigned HOST_WIDE_INT align)
33103 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33104 return toc_section;
33105 else
33106 return default_elf_select_rtx_section (mode, x, align);
33109 /* For a SYMBOL_REF, set generic flags and then perform some
33110 target-specific processing.
33112 When the AIX ABI is requested on a non-AIX system, replace the
33113 function name with the real name (with a leading .) rather than the
33114 function descriptor name. This saves a lot of overriding code to
33115 read the prefixes. */
33117 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33118 static void
33119 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33121 default_encode_section_info (decl, rtl, first);
33123 if (first
33124 && TREE_CODE (decl) == FUNCTION_DECL
33125 && !TARGET_AIX
33126 && DEFAULT_ABI == ABI_AIX)
33128 rtx sym_ref = XEXP (rtl, 0);
33129 size_t len = strlen (XSTR (sym_ref, 0));
33130 char *str = XALLOCAVEC (char, len + 2);
33131 str[0] = '.';
33132 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33133 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33137 static inline bool
33138 compare_section_name (const char *section, const char *templ)
33140 int len;
33142 len = strlen (templ);
33143 return (strncmp (section, templ, len) == 0
33144 && (section[len] == 0 || section[len] == '.'));
33147 bool
33148 rs6000_elf_in_small_data_p (const_tree decl)
33150 if (rs6000_sdata == SDATA_NONE)
33151 return false;
33153 /* We want to merge strings, so we never consider them small data. */
33154 if (TREE_CODE (decl) == STRING_CST)
33155 return false;
33157 /* Functions are never in the small data area. */
33158 if (TREE_CODE (decl) == FUNCTION_DECL)
33159 return false;
33161 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33163 const char *section = DECL_SECTION_NAME (decl);
33164 if (compare_section_name (section, ".sdata")
33165 || compare_section_name (section, ".sdata2")
33166 || compare_section_name (section, ".gnu.linkonce.s")
33167 || compare_section_name (section, ".sbss")
33168 || compare_section_name (section, ".sbss2")
33169 || compare_section_name (section, ".gnu.linkonce.sb")
33170 || strcmp (section, ".PPC.EMB.sdata0") == 0
33171 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33172 return true;
33174 else
33176 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33178 if (size > 0
33179 && size <= g_switch_value
33180 /* If it's not public, and we're not going to reference it there,
33181 there's no need to put it in the small data section. */
33182 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33183 return true;
33186 return false;
33189 #endif /* USING_ELFOS_H */
33191 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33193 static bool
33194 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33196 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33199 /* Do not place thread-local symbols refs in the object blocks. */
33201 static bool
33202 rs6000_use_blocks_for_decl_p (const_tree decl)
33204 return !DECL_THREAD_LOCAL_P (decl);
33207 /* Return a REG that occurs in ADDR with coefficient 1.
33208 ADDR can be effectively incremented by incrementing REG.
33210 r0 is special and we must not select it as an address
33211 register by this routine since our caller will try to
33212 increment the returned register via an "la" instruction. */
33215 find_addr_reg (rtx addr)
33217 while (GET_CODE (addr) == PLUS)
33219 if (GET_CODE (XEXP (addr, 0)) == REG
33220 && REGNO (XEXP (addr, 0)) != 0)
33221 addr = XEXP (addr, 0);
33222 else if (GET_CODE (XEXP (addr, 1)) == REG
33223 && REGNO (XEXP (addr, 1)) != 0)
33224 addr = XEXP (addr, 1);
33225 else if (CONSTANT_P (XEXP (addr, 0)))
33226 addr = XEXP (addr, 1);
33227 else if (CONSTANT_P (XEXP (addr, 1)))
33228 addr = XEXP (addr, 0);
33229 else
33230 gcc_unreachable ();
33232 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33233 return addr;
33236 void
33237 rs6000_fatal_bad_address (rtx op)
33239 fatal_insn ("bad address", op);
33242 #if TARGET_MACHO
33244 typedef struct branch_island_d {
33245 tree function_name;
33246 tree label_name;
33247 int line_number;
33248 } branch_island;
33251 static vec<branch_island, va_gc> *branch_islands;
33253 /* Remember to generate a branch island for far calls to the given
33254 function. */
33256 static void
33257 add_compiler_branch_island (tree label_name, tree function_name,
33258 int line_number)
33260 branch_island bi = {function_name, label_name, line_number};
33261 vec_safe_push (branch_islands, bi);
33264 /* Generate far-jump branch islands for everything recorded in
33265 branch_islands. Invoked immediately after the last instruction of
33266 the epilogue has been emitted; the branch islands must be appended
33267 to, and contiguous with, the function body. Mach-O stubs are
33268 generated in machopic_output_stub(). */
33270 static void
33271 macho_branch_islands (void)
33273 char tmp_buf[512];
33275 while (!vec_safe_is_empty (branch_islands))
33277 branch_island *bi = &branch_islands->last ();
33278 const char *label = IDENTIFIER_POINTER (bi->label_name);
33279 const char *name = IDENTIFIER_POINTER (bi->function_name);
33280 char name_buf[512];
33281 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33282 if (name[0] == '*' || name[0] == '&')
33283 strcpy (name_buf, name+1);
33284 else
33286 name_buf[0] = '_';
33287 strcpy (name_buf+1, name);
33289 strcpy (tmp_buf, "\n");
33290 strcat (tmp_buf, label);
33291 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33292 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33293 dbxout_stabd (N_SLINE, bi->line_number);
33294 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33295 if (flag_pic)
33297 if (TARGET_LINK_STACK)
33299 char name[32];
33300 get_ppc476_thunk_name (name);
33301 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33302 strcat (tmp_buf, name);
33303 strcat (tmp_buf, "\n");
33304 strcat (tmp_buf, label);
33305 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33307 else
33309 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33310 strcat (tmp_buf, label);
33311 strcat (tmp_buf, "_pic\n");
33312 strcat (tmp_buf, label);
33313 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33316 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33317 strcat (tmp_buf, name_buf);
33318 strcat (tmp_buf, " - ");
33319 strcat (tmp_buf, label);
33320 strcat (tmp_buf, "_pic)\n");
33322 strcat (tmp_buf, "\tmtlr r0\n");
33324 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33325 strcat (tmp_buf, name_buf);
33326 strcat (tmp_buf, " - ");
33327 strcat (tmp_buf, label);
33328 strcat (tmp_buf, "_pic)\n");
33330 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33332 else
33334 strcat (tmp_buf, ":\nlis r12,hi16(");
33335 strcat (tmp_buf, name_buf);
33336 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33337 strcat (tmp_buf, name_buf);
33338 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33340 output_asm_insn (tmp_buf, 0);
33341 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33342 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33343 dbxout_stabd (N_SLINE, bi->line_number);
33344 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33345 branch_islands->pop ();
33349 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33350 already there or not. */
33352 static int
33353 no_previous_def (tree function_name)
33355 branch_island *bi;
33356 unsigned ix;
33358 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33359 if (function_name == bi->function_name)
33360 return 0;
33361 return 1;
33364 /* GET_PREV_LABEL gets the label name from the previous definition of
33365 the function. */
33367 static tree
33368 get_prev_label (tree function_name)
33370 branch_island *bi;
33371 unsigned ix;
33373 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33374 if (function_name == bi->function_name)
33375 return bi->label_name;
33376 return NULL_TREE;
33379 /* INSN is either a function call or a millicode call. It may have an
33380 unconditional jump in its delay slot.
33382 CALL_DEST is the routine we are calling. */
33384 char *
33385 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33386 int cookie_operand_number)
33388 static char buf[256];
33389 if (darwin_emit_branch_islands
33390 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33391 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33393 tree labelname;
33394 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33396 if (no_previous_def (funname))
33398 rtx label_rtx = gen_label_rtx ();
33399 char *label_buf, temp_buf[256];
33400 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33401 CODE_LABEL_NUMBER (label_rtx));
33402 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33403 labelname = get_identifier (label_buf);
33404 add_compiler_branch_island (labelname, funname, insn_line (insn));
33406 else
33407 labelname = get_prev_label (funname);
33409 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33410 instruction will reach 'foo', otherwise link as 'bl L42'".
33411 "L42" should be a 'branch island', that will do a far jump to
33412 'foo'. Branch islands are generated in
33413 macho_branch_islands(). */
33414 sprintf (buf, "jbsr %%z%d,%.246s",
33415 dest_operand_number, IDENTIFIER_POINTER (labelname));
33417 else
33418 sprintf (buf, "bl %%z%d", dest_operand_number);
33419 return buf;
33422 /* Generate PIC and indirect symbol stubs. */
33424 void
33425 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33427 unsigned int length;
33428 char *symbol_name, *lazy_ptr_name;
33429 char *local_label_0;
33430 static int label = 0;
33432 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33433 symb = (*targetm.strip_name_encoding) (symb);
33436 length = strlen (symb);
33437 symbol_name = XALLOCAVEC (char, length + 32);
33438 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33440 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33441 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33443 if (flag_pic == 2)
33444 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33445 else
33446 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33448 if (flag_pic == 2)
33450 fprintf (file, "\t.align 5\n");
33452 fprintf (file, "%s:\n", stub);
33453 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33455 label++;
33456 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33457 sprintf (local_label_0, "\"L%011d$spb\"", label);
33459 fprintf (file, "\tmflr r0\n");
33460 if (TARGET_LINK_STACK)
33462 char name[32];
33463 get_ppc476_thunk_name (name);
33464 fprintf (file, "\tbl %s\n", name);
33465 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33467 else
33469 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33470 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33472 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33473 lazy_ptr_name, local_label_0);
33474 fprintf (file, "\tmtlr r0\n");
33475 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33476 (TARGET_64BIT ? "ldu" : "lwzu"),
33477 lazy_ptr_name, local_label_0);
33478 fprintf (file, "\tmtctr r12\n");
33479 fprintf (file, "\tbctr\n");
33481 else
33483 fprintf (file, "\t.align 4\n");
33485 fprintf (file, "%s:\n", stub);
33486 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33488 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33489 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33490 (TARGET_64BIT ? "ldu" : "lwzu"),
33491 lazy_ptr_name);
33492 fprintf (file, "\tmtctr r12\n");
33493 fprintf (file, "\tbctr\n");
33496 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33497 fprintf (file, "%s:\n", lazy_ptr_name);
33498 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33499 fprintf (file, "%sdyld_stub_binding_helper\n",
33500 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33503 /* Legitimize PIC addresses. If the address is already
33504 position-independent, we return ORIG. Newly generated
33505 position-independent addresses go into a reg. This is REG if non
33506 zero, otherwise we allocate register(s) as necessary. */
33508 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33511 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33512 rtx reg)
33514 rtx base, offset;
33516 if (reg == NULL && !reload_completed)
33517 reg = gen_reg_rtx (Pmode);
33519 if (GET_CODE (orig) == CONST)
33521 rtx reg_temp;
33523 if (GET_CODE (XEXP (orig, 0)) == PLUS
33524 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33525 return orig;
33527 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33529 /* Use a different reg for the intermediate value, as
33530 it will be marked UNCHANGING. */
33531 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33532 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33533 Pmode, reg_temp);
33534 offset =
33535 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33536 Pmode, reg);
33538 if (GET_CODE (offset) == CONST_INT)
33540 if (SMALL_INT (offset))
33541 return plus_constant (Pmode, base, INTVAL (offset));
33542 else if (!reload_completed)
33543 offset = force_reg (Pmode, offset);
33544 else
33546 rtx mem = force_const_mem (Pmode, orig);
33547 return machopic_legitimize_pic_address (mem, Pmode, reg);
33550 return gen_rtx_PLUS (Pmode, base, offset);
33553 /* Fall back on generic machopic code. */
33554 return machopic_legitimize_pic_address (orig, mode, reg);
33557 /* Output a .machine directive for the Darwin assembler, and call
33558 the generic start_file routine. */
33560 static void
33561 rs6000_darwin_file_start (void)
33563 static const struct
33565 const char *arg;
33566 const char *name;
33567 HOST_WIDE_INT if_set;
33568 } mapping[] = {
33569 { "ppc64", "ppc64", MASK_64BIT },
33570 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33571 { "power4", "ppc970", 0 },
33572 { "G5", "ppc970", 0 },
33573 { "7450", "ppc7450", 0 },
33574 { "7400", "ppc7400", MASK_ALTIVEC },
33575 { "G4", "ppc7400", 0 },
33576 { "750", "ppc750", 0 },
33577 { "740", "ppc750", 0 },
33578 { "G3", "ppc750", 0 },
33579 { "604e", "ppc604e", 0 },
33580 { "604", "ppc604", 0 },
33581 { "603e", "ppc603", 0 },
33582 { "603", "ppc603", 0 },
33583 { "601", "ppc601", 0 },
33584 { NULL, "ppc", 0 } };
33585 const char *cpu_id = "";
33586 size_t i;
33588 rs6000_file_start ();
33589 darwin_file_start ();
33591 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33593 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33594 cpu_id = rs6000_default_cpu;
33596 if (global_options_set.x_rs6000_cpu_index)
33597 cpu_id = processor_target_table[rs6000_cpu_index].name;
33599 /* Look through the mapping array. Pick the first name that either
33600 matches the argument, has a bit set in IF_SET that is also set
33601 in the target flags, or has a NULL name. */
33603 i = 0;
33604 while (mapping[i].arg != NULL
33605 && strcmp (mapping[i].arg, cpu_id) != 0
33606 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33607 i++;
33609 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33612 #endif /* TARGET_MACHO */
33614 #if TARGET_ELF
33615 static int
33616 rs6000_elf_reloc_rw_mask (void)
33618 if (flag_pic)
33619 return 3;
33620 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33621 return 2;
33622 else
33623 return 0;
33626 /* Record an element in the table of global constructors. SYMBOL is
33627 a SYMBOL_REF of the function to be called; PRIORITY is a number
33628 between 0 and MAX_INIT_PRIORITY.
33630 This differs from default_named_section_asm_out_constructor in
33631 that we have special handling for -mrelocatable. */
33633 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33634 static void
33635 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33637 const char *section = ".ctors";
33638 char buf[18];
33640 if (priority != DEFAULT_INIT_PRIORITY)
33642 sprintf (buf, ".ctors.%.5u",
33643 /* Invert the numbering so the linker puts us in the proper
33644 order; constructors are run from right to left, and the
33645 linker sorts in increasing order. */
33646 MAX_INIT_PRIORITY - priority);
33647 section = buf;
33650 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33651 assemble_align (POINTER_SIZE);
33653 if (DEFAULT_ABI == ABI_V4
33654 && (TARGET_RELOCATABLE || flag_pic > 1))
33656 fputs ("\t.long (", asm_out_file);
33657 output_addr_const (asm_out_file, symbol);
33658 fputs (")@fixup\n", asm_out_file);
33660 else
33661 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33664 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33665 static void
33666 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33668 const char *section = ".dtors";
33669 char buf[18];
33671 if (priority != DEFAULT_INIT_PRIORITY)
33673 sprintf (buf, ".dtors.%.5u",
33674 /* Invert the numbering so the linker puts us in the proper
33675 order; constructors are run from right to left, and the
33676 linker sorts in increasing order. */
33677 MAX_INIT_PRIORITY - priority);
33678 section = buf;
33681 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33682 assemble_align (POINTER_SIZE);
33684 if (DEFAULT_ABI == ABI_V4
33685 && (TARGET_RELOCATABLE || flag_pic > 1))
33687 fputs ("\t.long (", asm_out_file);
33688 output_addr_const (asm_out_file, symbol);
33689 fputs (")@fixup\n", asm_out_file);
33691 else
33692 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33695 void
33696 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33698 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33700 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33701 ASM_OUTPUT_LABEL (file, name);
33702 fputs (DOUBLE_INT_ASM_OP, file);
33703 rs6000_output_function_entry (file, name);
33704 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33705 if (DOT_SYMBOLS)
33707 fputs ("\t.size\t", file);
33708 assemble_name (file, name);
33709 fputs (",24\n\t.type\t.", file);
33710 assemble_name (file, name);
33711 fputs (",@function\n", file);
33712 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33714 fputs ("\t.globl\t.", file);
33715 assemble_name (file, name);
33716 putc ('\n', file);
33719 else
33720 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33721 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33722 rs6000_output_function_entry (file, name);
33723 fputs (":\n", file);
33724 return;
33727 int uses_toc;
33728 if (DEFAULT_ABI == ABI_V4
33729 && (TARGET_RELOCATABLE || flag_pic > 1)
33730 && !TARGET_SECURE_PLT
33731 && (!constant_pool_empty_p () || crtl->profile)
33732 && (uses_toc = uses_TOC ()))
33734 char buf[256];
33736 if (uses_toc == 2)
33737 switch_to_other_text_partition ();
33738 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33740 fprintf (file, "\t.long ");
33741 assemble_name (file, toc_label_name);
33742 need_toc_init = 1;
33743 putc ('-', file);
33744 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33745 assemble_name (file, buf);
33746 putc ('\n', file);
33747 if (uses_toc == 2)
33748 switch_to_other_text_partition ();
33751 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33752 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33754 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33756 char buf[256];
33758 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33760 fprintf (file, "\t.quad .TOC.-");
33761 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33762 assemble_name (file, buf);
33763 putc ('\n', file);
33766 if (DEFAULT_ABI == ABI_AIX)
33768 const char *desc_name, *orig_name;
33770 orig_name = (*targetm.strip_name_encoding) (name);
33771 desc_name = orig_name;
33772 while (*desc_name == '.')
33773 desc_name++;
33775 if (TREE_PUBLIC (decl))
33776 fprintf (file, "\t.globl %s\n", desc_name);
33778 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33779 fprintf (file, "%s:\n", desc_name);
33780 fprintf (file, "\t.long %s\n", orig_name);
33781 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33782 fputs ("\t.long 0\n", file);
33783 fprintf (file, "\t.previous\n");
33785 ASM_OUTPUT_LABEL (file, name);
33788 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33789 static void
33790 rs6000_elf_file_end (void)
33792 #ifdef HAVE_AS_GNU_ATTRIBUTE
33793 /* ??? The value emitted depends on options active at file end.
33794 Assume anyone using #pragma or attributes that might change
33795 options knows what they are doing. */
33796 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33797 && rs6000_passes_float)
33799 int fp;
33801 if (TARGET_DF_FPR)
33802 fp = 1;
33803 else if (TARGET_SF_FPR)
33804 fp = 3;
33805 else
33806 fp = 2;
33807 if (rs6000_passes_long_double)
33809 if (!TARGET_LONG_DOUBLE_128)
33810 fp |= 2 * 4;
33811 else if (TARGET_IEEEQUAD)
33812 fp |= 3 * 4;
33813 else
33814 fp |= 1 * 4;
33816 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33818 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33820 if (rs6000_passes_vector)
33821 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33822 (TARGET_ALTIVEC_ABI ? 2 : 1));
33823 if (rs6000_returns_struct)
33824 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33825 aix_struct_return ? 2 : 1);
33827 #endif
33828 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33829 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33830 file_end_indicate_exec_stack ();
33831 #endif
33833 if (flag_split_stack)
33834 file_end_indicate_split_stack ();
33836 if (cpu_builtin_p)
33838 /* We have expanded a CPU builtin, so we need to emit a reference to
33839 the special symbol that LIBC uses to declare it supports the
33840 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33841 switch_to_section (data_section);
33842 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33843 fprintf (asm_out_file, "\t%s %s\n",
33844 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33847 #endif
33849 #if TARGET_XCOFF
33851 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33852 #define HAVE_XCOFF_DWARF_EXTRAS 0
33853 #endif
33855 static enum unwind_info_type
33856 rs6000_xcoff_debug_unwind_info (void)
33858 return UI_NONE;
33861 static void
33862 rs6000_xcoff_asm_output_anchor (rtx symbol)
33864 char buffer[100];
33866 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33867 SYMBOL_REF_BLOCK_OFFSET (symbol));
33868 fprintf (asm_out_file, "%s", SET_ASM_OP);
33869 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33870 fprintf (asm_out_file, ",");
33871 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33872 fprintf (asm_out_file, "\n");
33875 static void
33876 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33878 fputs (GLOBAL_ASM_OP, stream);
33879 RS6000_OUTPUT_BASENAME (stream, name);
33880 putc ('\n', stream);
33883 /* A get_unnamed_decl callback, used for read-only sections. PTR
33884 points to the section string variable. */
33886 static void
33887 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33889 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33890 *(const char *const *) directive,
33891 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33894 /* Likewise for read-write sections. */
33896 static void
33897 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33899 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33900 *(const char *const *) directive,
33901 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33904 static void
33905 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33907 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33908 *(const char *const *) directive,
33909 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33912 /* A get_unnamed_section callback, used for switching to toc_section. */
33914 static void
33915 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33917 if (TARGET_MINIMAL_TOC)
33919 /* toc_section is always selected at least once from
33920 rs6000_xcoff_file_start, so this is guaranteed to
33921 always be defined once and only once in each file. */
33922 if (!toc_initialized)
33924 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33925 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33926 toc_initialized = 1;
33928 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33929 (TARGET_32BIT ? "" : ",3"));
33931 else
33932 fputs ("\t.toc\n", asm_out_file);
33935 /* Implement TARGET_ASM_INIT_SECTIONS. */
33937 static void
33938 rs6000_xcoff_asm_init_sections (void)
33940 read_only_data_section
33941 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33942 &xcoff_read_only_section_name);
33944 private_data_section
33945 = get_unnamed_section (SECTION_WRITE,
33946 rs6000_xcoff_output_readwrite_section_asm_op,
33947 &xcoff_private_data_section_name);
33949 tls_data_section
33950 = get_unnamed_section (SECTION_TLS,
33951 rs6000_xcoff_output_tls_section_asm_op,
33952 &xcoff_tls_data_section_name);
33954 tls_private_data_section
33955 = get_unnamed_section (SECTION_TLS,
33956 rs6000_xcoff_output_tls_section_asm_op,
33957 &xcoff_private_data_section_name);
33959 read_only_private_data_section
33960 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33961 &xcoff_private_data_section_name);
33963 toc_section
33964 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33966 readonly_data_section = read_only_data_section;
33969 static int
33970 rs6000_xcoff_reloc_rw_mask (void)
33972 return 3;
33975 static void
33976 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33977 tree decl ATTRIBUTE_UNUSED)
33979 int smclass;
33980 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33982 if (flags & SECTION_EXCLUDE)
33983 smclass = 4;
33984 else if (flags & SECTION_DEBUG)
33986 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33987 return;
33989 else if (flags & SECTION_CODE)
33990 smclass = 0;
33991 else if (flags & SECTION_TLS)
33992 smclass = 3;
33993 else if (flags & SECTION_WRITE)
33994 smclass = 2;
33995 else
33996 smclass = 1;
33998 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33999 (flags & SECTION_CODE) ? "." : "",
34000 name, suffix[smclass], flags & SECTION_ENTSIZE);
34003 #define IN_NAMED_SECTION(DECL) \
34004 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
34005 && DECL_SECTION_NAME (DECL) != NULL)
34007 static section *
34008 rs6000_xcoff_select_section (tree decl, int reloc,
34009 unsigned HOST_WIDE_INT align)
34011 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
34012 named section. */
34013 if (align > BIGGEST_ALIGNMENT)
34015 resolve_unique_section (decl, reloc, true);
34016 if (IN_NAMED_SECTION (decl))
34017 return get_named_section (decl, NULL, reloc);
34020 if (decl_readonly_section (decl, reloc))
34022 if (TREE_PUBLIC (decl))
34023 return read_only_data_section;
34024 else
34025 return read_only_private_data_section;
34027 else
34029 #if HAVE_AS_TLS
34030 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34032 if (TREE_PUBLIC (decl))
34033 return tls_data_section;
34034 else if (bss_initializer_p (decl))
34036 /* Convert to COMMON to emit in BSS. */
34037 DECL_COMMON (decl) = 1;
34038 return tls_comm_section;
34040 else
34041 return tls_private_data_section;
34043 else
34044 #endif
34045 if (TREE_PUBLIC (decl))
34046 return data_section;
34047 else
34048 return private_data_section;
34052 static void
34053 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
34055 const char *name;
34057 /* Use select_section for private data and uninitialized data with
34058 alignment <= BIGGEST_ALIGNMENT. */
34059 if (!TREE_PUBLIC (decl)
34060 || DECL_COMMON (decl)
34061 || (DECL_INITIAL (decl) == NULL_TREE
34062 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
34063 || DECL_INITIAL (decl) == error_mark_node
34064 || (flag_zero_initialized_in_bss
34065 && initializer_zerop (DECL_INITIAL (decl))))
34066 return;
34068 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
34069 name = (*targetm.strip_name_encoding) (name);
34070 set_decl_section_name (decl, name);
34073 /* Select section for constant in constant pool.
34075 On RS/6000, all constants are in the private read-only data area.
34076 However, if this is being placed in the TOC it must be output as a
34077 toc entry. */
34079 static section *
34080 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34081 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34083 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34084 return toc_section;
34085 else
34086 return read_only_private_data_section;
34089 /* Remove any trailing [DS] or the like from the symbol name. */
34091 static const char *
34092 rs6000_xcoff_strip_name_encoding (const char *name)
34094 size_t len;
34095 if (*name == '*')
34096 name++;
34097 len = strlen (name);
34098 if (name[len - 1] == ']')
34099 return ggc_alloc_string (name, len - 4);
34100 else
34101 return name;
34104 /* Section attributes. AIX is always PIC. */
34106 static unsigned int
34107 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34109 unsigned int align;
34110 unsigned int flags = default_section_type_flags (decl, name, reloc);
34112 /* Align to at least UNIT size. */
34113 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34114 align = MIN_UNITS_PER_WORD;
34115 else
34116 /* Increase alignment of large objects if not already stricter. */
34117 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34118 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34119 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34121 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34124 /* Output at beginning of assembler file.
34126 Initialize the section names for the RS/6000 at this point.
34128 Specify filename, including full path, to assembler.
34130 We want to go into the TOC section so at least one .toc will be emitted.
34131 Also, in order to output proper .bs/.es pairs, we need at least one static
34132 [RW] section emitted.
34134 Finally, declare mcount when profiling to make the assembler happy. */
34136 static void
34137 rs6000_xcoff_file_start (void)
34139 rs6000_gen_section_name (&xcoff_bss_section_name,
34140 main_input_filename, ".bss_");
34141 rs6000_gen_section_name (&xcoff_private_data_section_name,
34142 main_input_filename, ".rw_");
34143 rs6000_gen_section_name (&xcoff_read_only_section_name,
34144 main_input_filename, ".ro_");
34145 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34146 main_input_filename, ".tls_");
34147 rs6000_gen_section_name (&xcoff_tbss_section_name,
34148 main_input_filename, ".tbss_[UL]");
34150 fputs ("\t.file\t", asm_out_file);
34151 output_quoted_string (asm_out_file, main_input_filename);
34152 fputc ('\n', asm_out_file);
34153 if (write_symbols != NO_DEBUG)
34154 switch_to_section (private_data_section);
34155 switch_to_section (toc_section);
34156 switch_to_section (text_section);
34157 if (profile_flag)
34158 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34159 rs6000_file_start ();
34162 /* Output at end of assembler file.
34163 On the RS/6000, referencing data should automatically pull in text. */
34165 static void
34166 rs6000_xcoff_file_end (void)
34168 switch_to_section (text_section);
34169 fputs ("_section_.text:\n", asm_out_file);
34170 switch_to_section (data_section);
34171 fputs (TARGET_32BIT
34172 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34173 asm_out_file);
34176 struct declare_alias_data
34178 FILE *file;
34179 bool function_descriptor;
34182 /* Declare alias N. A helper function for for_node_and_aliases. */
34184 static bool
34185 rs6000_declare_alias (struct symtab_node *n, void *d)
34187 struct declare_alias_data *data = (struct declare_alias_data *)d;
34188 /* Main symbol is output specially, because varasm machinery does part of
34189 the job for us - we do not need to declare .globl/lglobs and such. */
34190 if (!n->alias || n->weakref)
34191 return false;
34193 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34194 return false;
34196 /* Prevent assemble_alias from trying to use .set pseudo operation
34197 that does not behave as expected by the middle-end. */
34198 TREE_ASM_WRITTEN (n->decl) = true;
34200 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34201 char *buffer = (char *) alloca (strlen (name) + 2);
34202 char *p;
34203 int dollar_inside = 0;
34205 strcpy (buffer, name);
34206 p = strchr (buffer, '$');
34207 while (p) {
34208 *p = '_';
34209 dollar_inside++;
34210 p = strchr (p + 1, '$');
34212 if (TREE_PUBLIC (n->decl))
34214 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34216 if (dollar_inside) {
34217 if (data->function_descriptor)
34218 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34219 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34221 if (data->function_descriptor)
34223 fputs ("\t.globl .", data->file);
34224 RS6000_OUTPUT_BASENAME (data->file, buffer);
34225 putc ('\n', data->file);
34227 fputs ("\t.globl ", data->file);
34228 RS6000_OUTPUT_BASENAME (data->file, buffer);
34229 putc ('\n', data->file);
34231 #ifdef ASM_WEAKEN_DECL
34232 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34233 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34234 #endif
34236 else
34238 if (dollar_inside)
34240 if (data->function_descriptor)
34241 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34242 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34244 if (data->function_descriptor)
34246 fputs ("\t.lglobl .", data->file);
34247 RS6000_OUTPUT_BASENAME (data->file, buffer);
34248 putc ('\n', data->file);
34250 fputs ("\t.lglobl ", data->file);
34251 RS6000_OUTPUT_BASENAME (data->file, buffer);
34252 putc ('\n', data->file);
34254 if (data->function_descriptor)
34255 fputs (".", data->file);
34256 RS6000_OUTPUT_BASENAME (data->file, buffer);
34257 fputs (":\n", data->file);
34258 return false;
34262 #ifdef HAVE_GAS_HIDDEN
34263 /* Helper function to calculate visibility of a DECL
34264 and return the value as a const string. */
34266 static const char *
34267 rs6000_xcoff_visibility (tree decl)
34269 static const char * const visibility_types[] = {
34270 "", ",protected", ",hidden", ",internal"
34273 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34275 if (TREE_CODE (decl) == FUNCTION_DECL
34276 && cgraph_node::get (decl)
34277 && cgraph_node::get (decl)->instrumentation_clone
34278 && cgraph_node::get (decl)->instrumented_version)
34279 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
34281 return visibility_types[vis];
34283 #endif
34286 /* This macro produces the initial definition of a function name.
34287 On the RS/6000, we need to place an extra '.' in the function name and
34288 output the function descriptor.
34289 Dollar signs are converted to underscores.
34291 The csect for the function will have already been created when
34292 text_section was selected. We do have to go back to that csect, however.
34294 The third and fourth parameters to the .function pseudo-op (16 and 044)
34295 are placeholders which no longer have any use.
34297 Because AIX assembler's .set command has unexpected semantics, we output
34298 all aliases as alternative labels in front of the definition. */
34300 void
34301 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34303 char *buffer = (char *) alloca (strlen (name) + 1);
34304 char *p;
34305 int dollar_inside = 0;
34306 struct declare_alias_data data = {file, false};
34308 strcpy (buffer, name);
34309 p = strchr (buffer, '$');
34310 while (p) {
34311 *p = '_';
34312 dollar_inside++;
34313 p = strchr (p + 1, '$');
34315 if (TREE_PUBLIC (decl))
34317 if (!RS6000_WEAK || !DECL_WEAK (decl))
34319 if (dollar_inside) {
34320 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34321 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34323 fputs ("\t.globl .", file);
34324 RS6000_OUTPUT_BASENAME (file, buffer);
34325 #ifdef HAVE_GAS_HIDDEN
34326 fputs (rs6000_xcoff_visibility (decl), file);
34327 #endif
34328 putc ('\n', file);
34331 else
34333 if (dollar_inside) {
34334 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34335 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34337 fputs ("\t.lglobl .", file);
34338 RS6000_OUTPUT_BASENAME (file, buffer);
34339 putc ('\n', file);
34341 fputs ("\t.csect ", file);
34342 RS6000_OUTPUT_BASENAME (file, buffer);
34343 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34344 RS6000_OUTPUT_BASENAME (file, buffer);
34345 fputs (":\n", file);
34346 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34347 &data, true);
34348 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34349 RS6000_OUTPUT_BASENAME (file, buffer);
34350 fputs (", TOC[tc0], 0\n", file);
34351 in_section = NULL;
34352 switch_to_section (function_section (decl));
34353 putc ('.', file);
34354 RS6000_OUTPUT_BASENAME (file, buffer);
34355 fputs (":\n", file);
34356 data.function_descriptor = true;
34357 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34358 &data, true);
34359 if (!DECL_IGNORED_P (decl))
34361 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34362 xcoffout_declare_function (file, decl, buffer);
34363 else if (write_symbols == DWARF2_DEBUG)
34365 name = (*targetm.strip_name_encoding) (name);
34366 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34369 return;
34373 /* Output assembly language to globalize a symbol from a DECL,
34374 possibly with visibility. */
34376 void
34377 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34379 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34380 fputs (GLOBAL_ASM_OP, stream);
34381 RS6000_OUTPUT_BASENAME (stream, name);
34382 #ifdef HAVE_GAS_HIDDEN
34383 fputs (rs6000_xcoff_visibility (decl), stream);
34384 #endif
34385 putc ('\n', stream);
34388 /* Output assembly language to define a symbol as COMMON from a DECL,
34389 possibly with visibility. */
34391 void
34392 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34393 tree decl ATTRIBUTE_UNUSED,
34394 const char *name,
34395 unsigned HOST_WIDE_INT size,
34396 unsigned HOST_WIDE_INT align)
34398 unsigned HOST_WIDE_INT align2 = 2;
34400 if (align > 32)
34401 align2 = floor_log2 (align / BITS_PER_UNIT);
34402 else if (size > 4)
34403 align2 = 3;
34405 fputs (COMMON_ASM_OP, stream);
34406 RS6000_OUTPUT_BASENAME (stream, name);
34408 fprintf (stream,
34409 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34410 size, align2);
34412 #ifdef HAVE_GAS_HIDDEN
34413 if (decl != NULL)
34414 fputs (rs6000_xcoff_visibility (decl), stream);
34415 #endif
34416 putc ('\n', stream);
34419 /* This macro produces the initial definition of a object (variable) name.
34420 Because AIX assembler's .set command has unexpected semantics, we output
34421 all aliases as alternative labels in front of the definition. */
34423 void
34424 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34426 struct declare_alias_data data = {file, false};
34427 RS6000_OUTPUT_BASENAME (file, name);
34428 fputs (":\n", file);
34429 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34430 &data, true);
34433 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34435 void
34436 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34438 fputs (integer_asm_op (size, FALSE), file);
34439 assemble_name (file, label);
34440 fputs ("-$", file);
34443 /* Output a symbol offset relative to the dbase for the current object.
34444 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34445 signed offsets.
34447 __gcc_unwind_dbase is embedded in all executables/libraries through
34448 libgcc/config/rs6000/crtdbase.S. */
34450 void
34451 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34453 fputs (integer_asm_op (size, FALSE), file);
34454 assemble_name (file, label);
34455 fputs("-__gcc_unwind_dbase", file);
34458 #ifdef HAVE_AS_TLS
34459 static void
34460 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34462 rtx symbol;
34463 int flags;
34464 const char *symname;
34466 default_encode_section_info (decl, rtl, first);
34468 /* Careful not to prod global register variables. */
34469 if (!MEM_P (rtl))
34470 return;
34471 symbol = XEXP (rtl, 0);
34472 if (GET_CODE (symbol) != SYMBOL_REF)
34473 return;
34475 flags = SYMBOL_REF_FLAGS (symbol);
34477 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34478 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34480 SYMBOL_REF_FLAGS (symbol) = flags;
34482 /* Append mapping class to extern decls. */
34483 symname = XSTR (symbol, 0);
34484 if (decl /* sync condition with assemble_external () */
34485 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34486 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34487 || TREE_CODE (decl) == FUNCTION_DECL)
34488 && symname[strlen (symname) - 1] != ']')
34490 char *newname = (char *) alloca (strlen (symname) + 5);
34491 strcpy (newname, symname);
34492 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34493 ? "[DS]" : "[UA]"));
34494 XSTR (symbol, 0) = ggc_strdup (newname);
34497 #endif /* HAVE_AS_TLS */
34498 #endif /* TARGET_XCOFF */
34500 void
34501 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34502 const char *name, const char *val)
34504 fputs ("\t.weak\t", stream);
34505 RS6000_OUTPUT_BASENAME (stream, name);
34506 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34507 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34509 if (TARGET_XCOFF)
34510 fputs ("[DS]", stream);
34511 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34512 if (TARGET_XCOFF)
34513 fputs (rs6000_xcoff_visibility (decl), stream);
34514 #endif
34515 fputs ("\n\t.weak\t.", stream);
34516 RS6000_OUTPUT_BASENAME (stream, name);
34518 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34519 if (TARGET_XCOFF)
34520 fputs (rs6000_xcoff_visibility (decl), stream);
34521 #endif
34522 fputc ('\n', stream);
34523 if (val)
34525 #ifdef ASM_OUTPUT_DEF
34526 ASM_OUTPUT_DEF (stream, name, val);
34527 #endif
34528 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34529 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34531 fputs ("\t.set\t.", stream);
34532 RS6000_OUTPUT_BASENAME (stream, name);
34533 fputs (",.", stream);
34534 RS6000_OUTPUT_BASENAME (stream, val);
34535 fputc ('\n', stream);
34541 /* Return true if INSN should not be copied. */
34543 static bool
34544 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34546 return recog_memoized (insn) >= 0
34547 && get_attr_cannot_copy (insn);
34550 /* Compute a (partial) cost for rtx X. Return true if the complete
34551 cost has been computed, and false if subexpressions should be
34552 scanned. In either case, *TOTAL contains the cost result. */
34554 static bool
34555 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34556 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34558 int code = GET_CODE (x);
34560 switch (code)
34562 /* On the RS/6000, if it is valid in the insn, it is free. */
34563 case CONST_INT:
34564 if (((outer_code == SET
34565 || outer_code == PLUS
34566 || outer_code == MINUS)
34567 && (satisfies_constraint_I (x)
34568 || satisfies_constraint_L (x)))
34569 || (outer_code == AND
34570 && (satisfies_constraint_K (x)
34571 || (mode == SImode
34572 ? satisfies_constraint_L (x)
34573 : satisfies_constraint_J (x))))
34574 || ((outer_code == IOR || outer_code == XOR)
34575 && (satisfies_constraint_K (x)
34576 || (mode == SImode
34577 ? satisfies_constraint_L (x)
34578 : satisfies_constraint_J (x))))
34579 || outer_code == ASHIFT
34580 || outer_code == ASHIFTRT
34581 || outer_code == LSHIFTRT
34582 || outer_code == ROTATE
34583 || outer_code == ROTATERT
34584 || outer_code == ZERO_EXTRACT
34585 || (outer_code == MULT
34586 && satisfies_constraint_I (x))
34587 || ((outer_code == DIV || outer_code == UDIV
34588 || outer_code == MOD || outer_code == UMOD)
34589 && exact_log2 (INTVAL (x)) >= 0)
34590 || (outer_code == COMPARE
34591 && (satisfies_constraint_I (x)
34592 || satisfies_constraint_K (x)))
34593 || ((outer_code == EQ || outer_code == NE)
34594 && (satisfies_constraint_I (x)
34595 || satisfies_constraint_K (x)
34596 || (mode == SImode
34597 ? satisfies_constraint_L (x)
34598 : satisfies_constraint_J (x))))
34599 || (outer_code == GTU
34600 && satisfies_constraint_I (x))
34601 || (outer_code == LTU
34602 && satisfies_constraint_P (x)))
34604 *total = 0;
34605 return true;
34607 else if ((outer_code == PLUS
34608 && reg_or_add_cint_operand (x, VOIDmode))
34609 || (outer_code == MINUS
34610 && reg_or_sub_cint_operand (x, VOIDmode))
34611 || ((outer_code == SET
34612 || outer_code == IOR
34613 || outer_code == XOR)
34614 && (INTVAL (x)
34615 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34617 *total = COSTS_N_INSNS (1);
34618 return true;
34620 /* FALLTHRU */
34622 case CONST_DOUBLE:
34623 case CONST_WIDE_INT:
34624 case CONST:
34625 case HIGH:
34626 case SYMBOL_REF:
34627 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34628 return true;
34630 case MEM:
34631 /* When optimizing for size, MEM should be slightly more expensive
34632 than generating address, e.g., (plus (reg) (const)).
34633 L1 cache latency is about two instructions. */
34634 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34635 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34636 *total += COSTS_N_INSNS (100);
34637 return true;
34639 case LABEL_REF:
34640 *total = 0;
34641 return true;
34643 case PLUS:
34644 case MINUS:
34645 if (FLOAT_MODE_P (mode))
34646 *total = rs6000_cost->fp;
34647 else
34648 *total = COSTS_N_INSNS (1);
34649 return false;
34651 case MULT:
34652 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34653 && satisfies_constraint_I (XEXP (x, 1)))
34655 if (INTVAL (XEXP (x, 1)) >= -256
34656 && INTVAL (XEXP (x, 1)) <= 255)
34657 *total = rs6000_cost->mulsi_const9;
34658 else
34659 *total = rs6000_cost->mulsi_const;
34661 else if (mode == SFmode)
34662 *total = rs6000_cost->fp;
34663 else if (FLOAT_MODE_P (mode))
34664 *total = rs6000_cost->dmul;
34665 else if (mode == DImode)
34666 *total = rs6000_cost->muldi;
34667 else
34668 *total = rs6000_cost->mulsi;
34669 return false;
34671 case FMA:
34672 if (mode == SFmode)
34673 *total = rs6000_cost->fp;
34674 else
34675 *total = rs6000_cost->dmul;
34676 break;
34678 case DIV:
34679 case MOD:
34680 if (FLOAT_MODE_P (mode))
34682 *total = mode == DFmode ? rs6000_cost->ddiv
34683 : rs6000_cost->sdiv;
34684 return false;
34686 /* FALLTHRU */
34688 case UDIV:
34689 case UMOD:
34690 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34691 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34693 if (code == DIV || code == MOD)
34694 /* Shift, addze */
34695 *total = COSTS_N_INSNS (2);
34696 else
34697 /* Shift */
34698 *total = COSTS_N_INSNS (1);
34700 else
34702 if (GET_MODE (XEXP (x, 1)) == DImode)
34703 *total = rs6000_cost->divdi;
34704 else
34705 *total = rs6000_cost->divsi;
34707 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34708 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34709 *total += COSTS_N_INSNS (2);
34710 return false;
34712 case CTZ:
34713 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34714 return false;
34716 case FFS:
34717 *total = COSTS_N_INSNS (4);
34718 return false;
34720 case POPCOUNT:
34721 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34722 return false;
34724 case PARITY:
34725 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34726 return false;
34728 case NOT:
34729 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34730 *total = 0;
34731 else
34732 *total = COSTS_N_INSNS (1);
34733 return false;
34735 case AND:
34736 if (CONST_INT_P (XEXP (x, 1)))
34738 rtx left = XEXP (x, 0);
34739 rtx_code left_code = GET_CODE (left);
34741 /* rotate-and-mask: 1 insn. */
34742 if ((left_code == ROTATE
34743 || left_code == ASHIFT
34744 || left_code == LSHIFTRT)
34745 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34747 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34748 if (!CONST_INT_P (XEXP (left, 1)))
34749 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34750 *total += COSTS_N_INSNS (1);
34751 return true;
34754 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34755 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34756 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34757 || (val & 0xffff) == val
34758 || (val & 0xffff0000) == val
34759 || ((val & 0xffff) == 0 && mode == SImode))
34761 *total = rtx_cost (left, mode, AND, 0, speed);
34762 *total += COSTS_N_INSNS (1);
34763 return true;
34766 /* 2 insns. */
34767 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34769 *total = rtx_cost (left, mode, AND, 0, speed);
34770 *total += COSTS_N_INSNS (2);
34771 return true;
34775 *total = COSTS_N_INSNS (1);
34776 return false;
34778 case IOR:
34779 /* FIXME */
34780 *total = COSTS_N_INSNS (1);
34781 return true;
34783 case CLZ:
34784 case XOR:
34785 case ZERO_EXTRACT:
34786 *total = COSTS_N_INSNS (1);
34787 return false;
34789 case ASHIFT:
34790 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34791 the sign extend and shift separately within the insn. */
34792 if (TARGET_EXTSWSLI && mode == DImode
34793 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34794 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34796 *total = 0;
34797 return false;
34799 /* fall through */
34801 case ASHIFTRT:
34802 case LSHIFTRT:
34803 case ROTATE:
34804 case ROTATERT:
34805 /* Handle mul_highpart. */
34806 if (outer_code == TRUNCATE
34807 && GET_CODE (XEXP (x, 0)) == MULT)
34809 if (mode == DImode)
34810 *total = rs6000_cost->muldi;
34811 else
34812 *total = rs6000_cost->mulsi;
34813 return true;
34815 else if (outer_code == AND)
34816 *total = 0;
34817 else
34818 *total = COSTS_N_INSNS (1);
34819 return false;
34821 case SIGN_EXTEND:
34822 case ZERO_EXTEND:
34823 if (GET_CODE (XEXP (x, 0)) == MEM)
34824 *total = 0;
34825 else
34826 *total = COSTS_N_INSNS (1);
34827 return false;
34829 case COMPARE:
34830 case NEG:
34831 case ABS:
34832 if (!FLOAT_MODE_P (mode))
34834 *total = COSTS_N_INSNS (1);
34835 return false;
34837 /* FALLTHRU */
34839 case FLOAT:
34840 case UNSIGNED_FLOAT:
34841 case FIX:
34842 case UNSIGNED_FIX:
34843 case FLOAT_TRUNCATE:
34844 *total = rs6000_cost->fp;
34845 return false;
34847 case FLOAT_EXTEND:
34848 if (mode == DFmode)
34849 *total = rs6000_cost->sfdf_convert;
34850 else
34851 *total = rs6000_cost->fp;
34852 return false;
34854 case UNSPEC:
34855 switch (XINT (x, 1))
34857 case UNSPEC_FRSP:
34858 *total = rs6000_cost->fp;
34859 return true;
34861 default:
34862 break;
34864 break;
34866 case CALL:
34867 case IF_THEN_ELSE:
34868 if (!speed)
34870 *total = COSTS_N_INSNS (1);
34871 return true;
34873 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34875 *total = rs6000_cost->fp;
34876 return false;
34878 break;
34880 case NE:
34881 case EQ:
34882 case GTU:
34883 case LTU:
34884 /* Carry bit requires mode == Pmode.
34885 NEG or PLUS already counted so only add one. */
34886 if (mode == Pmode
34887 && (outer_code == NEG || outer_code == PLUS))
34889 *total = COSTS_N_INSNS (1);
34890 return true;
34892 /* FALLTHRU */
34894 case GT:
34895 case LT:
34896 case UNORDERED:
34897 if (outer_code == SET)
34899 if (XEXP (x, 1) == const0_rtx)
34901 *total = COSTS_N_INSNS (2);
34902 return true;
34904 else
34906 *total = COSTS_N_INSNS (3);
34907 return false;
34910 /* CC COMPARE. */
34911 if (outer_code == COMPARE)
34913 *total = 0;
34914 return true;
34916 break;
34918 default:
34919 break;
34922 return false;
34925 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34927 static bool
34928 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34929 int opno, int *total, bool speed)
34931 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34933 fprintf (stderr,
34934 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34935 "opno = %d, total = %d, speed = %s, x:\n",
34936 ret ? "complete" : "scan inner",
34937 GET_MODE_NAME (mode),
34938 GET_RTX_NAME (outer_code),
34939 opno,
34940 *total,
34941 speed ? "true" : "false");
34943 debug_rtx (x);
34945 return ret;
34948 static int
34949 rs6000_insn_cost (rtx_insn *insn, bool speed)
34951 if (recog_memoized (insn) < 0)
34952 return 0;
34954 if (!speed)
34955 return get_attr_length (insn);
34957 int cost = get_attr_cost (insn);
34958 if (cost > 0)
34959 return cost;
34961 int n = get_attr_length (insn) / 4;
34962 enum attr_type type = get_attr_type (insn);
34964 switch (type)
34966 case TYPE_LOAD:
34967 case TYPE_FPLOAD:
34968 case TYPE_VECLOAD:
34969 cost = COSTS_N_INSNS (n + 1);
34970 break;
34972 case TYPE_MUL:
34973 switch (get_attr_size (insn))
34975 case SIZE_8:
34976 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34977 break;
34978 case SIZE_16:
34979 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34980 break;
34981 case SIZE_32:
34982 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34983 break;
34984 case SIZE_64:
34985 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34986 break;
34987 default:
34988 gcc_unreachable ();
34990 break;
34991 case TYPE_DIV:
34992 switch (get_attr_size (insn))
34994 case SIZE_32:
34995 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34996 break;
34997 case SIZE_64:
34998 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34999 break;
35000 default:
35001 gcc_unreachable ();
35003 break;
35005 case TYPE_FP:
35006 cost = n * rs6000_cost->fp;
35007 break;
35008 case TYPE_DMUL:
35009 cost = n * rs6000_cost->dmul;
35010 break;
35011 case TYPE_SDIV:
35012 cost = n * rs6000_cost->sdiv;
35013 break;
35014 case TYPE_DDIV:
35015 cost = n * rs6000_cost->ddiv;
35016 break;
35018 case TYPE_SYNC:
35019 case TYPE_LOAD_L:
35020 case TYPE_MFCR:
35021 case TYPE_MFCRF:
35022 cost = COSTS_N_INSNS (n + 2);
35023 break;
35025 default:
35026 cost = COSTS_N_INSNS (n);
35029 return cost;
35032 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35034 static int
35035 rs6000_debug_address_cost (rtx x, machine_mode mode,
35036 addr_space_t as, bool speed)
35038 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
35040 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35041 ret, speed ? "true" : "false");
35042 debug_rtx (x);
35044 return ret;
35048 /* A C expression returning the cost of moving data from a register of class
35049 CLASS1 to one of CLASS2. */
35051 static int
35052 rs6000_register_move_cost (machine_mode mode,
35053 reg_class_t from, reg_class_t to)
35055 int ret;
35057 if (TARGET_DEBUG_COST)
35058 dbg_cost_ctrl++;
35060 /* Moves from/to GENERAL_REGS. */
35061 if (reg_classes_intersect_p (to, GENERAL_REGS)
35062 || reg_classes_intersect_p (from, GENERAL_REGS))
35064 reg_class_t rclass = from;
35066 if (! reg_classes_intersect_p (to, GENERAL_REGS))
35067 rclass = to;
35069 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
35070 ret = (rs6000_memory_move_cost (mode, rclass, false)
35071 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
35073 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35074 shift. */
35075 else if (rclass == CR_REGS)
35076 ret = 4;
35078 /* For those processors that have slow LR/CTR moves, make them more
35079 expensive than memory in order to bias spills to memory .*/
35080 else if ((rs6000_cpu == PROCESSOR_POWER6
35081 || rs6000_cpu == PROCESSOR_POWER7
35082 || rs6000_cpu == PROCESSOR_POWER8
35083 || rs6000_cpu == PROCESSOR_POWER9)
35084 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35085 ret = 6 * hard_regno_nregs (0, mode);
35087 else
35088 /* A move will cost one instruction per GPR moved. */
35089 ret = 2 * hard_regno_nregs (0, mode);
35092 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35093 else if (VECTOR_MEM_VSX_P (mode)
35094 && reg_classes_intersect_p (to, VSX_REGS)
35095 && reg_classes_intersect_p (from, VSX_REGS))
35096 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35098 /* Moving between two similar registers is just one instruction. */
35099 else if (reg_classes_intersect_p (to, from))
35100 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35102 /* Everything else has to go through GENERAL_REGS. */
35103 else
35104 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35105 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35107 if (TARGET_DEBUG_COST)
35109 if (dbg_cost_ctrl == 1)
35110 fprintf (stderr,
35111 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35112 ret, GET_MODE_NAME (mode), reg_class_names[from],
35113 reg_class_names[to]);
35114 dbg_cost_ctrl--;
35117 return ret;
35120 /* A C expressions returning the cost of moving data of MODE from a register to
35121 or from memory. */
35123 static int
35124 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35125 bool in ATTRIBUTE_UNUSED)
35127 int ret;
35129 if (TARGET_DEBUG_COST)
35130 dbg_cost_ctrl++;
35132 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35133 ret = 4 * hard_regno_nregs (0, mode);
35134 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35135 || reg_classes_intersect_p (rclass, VSX_REGS)))
35136 ret = 4 * hard_regno_nregs (32, mode);
35137 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35138 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35139 else
35140 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35142 if (TARGET_DEBUG_COST)
35144 if (dbg_cost_ctrl == 1)
35145 fprintf (stderr,
35146 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35147 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35148 dbg_cost_ctrl--;
35151 return ret;
35154 /* Returns a code for a target-specific builtin that implements
35155 reciprocal of the function, or NULL_TREE if not available. */
35157 static tree
35158 rs6000_builtin_reciprocal (tree fndecl)
35160 switch (DECL_FUNCTION_CODE (fndecl))
35162 case VSX_BUILTIN_XVSQRTDP:
35163 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35164 return NULL_TREE;
35166 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35168 case VSX_BUILTIN_XVSQRTSP:
35169 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35170 return NULL_TREE;
35172 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35174 default:
35175 return NULL_TREE;
35179 /* Load up a constant. If the mode is a vector mode, splat the value across
35180 all of the vector elements. */
35182 static rtx
35183 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35185 rtx reg;
35187 if (mode == SFmode || mode == DFmode)
35189 rtx d = const_double_from_real_value (dconst, mode);
35190 reg = force_reg (mode, d);
35192 else if (mode == V4SFmode)
35194 rtx d = const_double_from_real_value (dconst, SFmode);
35195 rtvec v = gen_rtvec (4, d, d, d, d);
35196 reg = gen_reg_rtx (mode);
35197 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35199 else if (mode == V2DFmode)
35201 rtx d = const_double_from_real_value (dconst, DFmode);
35202 rtvec v = gen_rtvec (2, d, d);
35203 reg = gen_reg_rtx (mode);
35204 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35206 else
35207 gcc_unreachable ();
35209 return reg;
35212 /* Generate an FMA instruction. */
35214 static void
35215 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35217 machine_mode mode = GET_MODE (target);
35218 rtx dst;
35220 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35221 gcc_assert (dst != NULL);
35223 if (dst != target)
35224 emit_move_insn (target, dst);
35227 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35229 static void
35230 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35232 machine_mode mode = GET_MODE (dst);
35233 rtx r;
35235 /* This is a tad more complicated, since the fnma_optab is for
35236 a different expression: fma(-m1, m2, a), which is the same
35237 thing except in the case of signed zeros.
35239 Fortunately we know that if FMA is supported that FNMSUB is
35240 also supported in the ISA. Just expand it directly. */
35242 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35244 r = gen_rtx_NEG (mode, a);
35245 r = gen_rtx_FMA (mode, m1, m2, r);
35246 r = gen_rtx_NEG (mode, r);
35247 emit_insn (gen_rtx_SET (dst, r));
35250 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35251 add a reg_note saying that this was a division. Support both scalar and
35252 vector divide. Assumes no trapping math and finite arguments. */
35254 void
35255 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35257 machine_mode mode = GET_MODE (dst);
35258 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35259 int i;
35261 /* Low precision estimates guarantee 5 bits of accuracy. High
35262 precision estimates guarantee 14 bits of accuracy. SFmode
35263 requires 23 bits of accuracy. DFmode requires 52 bits of
35264 accuracy. Each pass at least doubles the accuracy, leading
35265 to the following. */
35266 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35267 if (mode == DFmode || mode == V2DFmode)
35268 passes++;
35270 enum insn_code code = optab_handler (smul_optab, mode);
35271 insn_gen_fn gen_mul = GEN_FCN (code);
35273 gcc_assert (code != CODE_FOR_nothing);
35275 one = rs6000_load_constant_and_splat (mode, dconst1);
35277 /* x0 = 1./d estimate */
35278 x0 = gen_reg_rtx (mode);
35279 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35280 UNSPEC_FRES)));
35282 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35283 if (passes > 1) {
35285 /* e0 = 1. - d * x0 */
35286 e0 = gen_reg_rtx (mode);
35287 rs6000_emit_nmsub (e0, d, x0, one);
35289 /* x1 = x0 + e0 * x0 */
35290 x1 = gen_reg_rtx (mode);
35291 rs6000_emit_madd (x1, e0, x0, x0);
35293 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35294 ++i, xprev = xnext, eprev = enext) {
35296 /* enext = eprev * eprev */
35297 enext = gen_reg_rtx (mode);
35298 emit_insn (gen_mul (enext, eprev, eprev));
35300 /* xnext = xprev + enext * xprev */
35301 xnext = gen_reg_rtx (mode);
35302 rs6000_emit_madd (xnext, enext, xprev, xprev);
35305 } else
35306 xprev = x0;
35308 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35310 /* u = n * xprev */
35311 u = gen_reg_rtx (mode);
35312 emit_insn (gen_mul (u, n, xprev));
35314 /* v = n - (d * u) */
35315 v = gen_reg_rtx (mode);
35316 rs6000_emit_nmsub (v, d, u, n);
35318 /* dst = (v * xprev) + u */
35319 rs6000_emit_madd (dst, v, xprev, u);
35321 if (note_p)
35322 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35325 /* Goldschmidt's Algorithm for single/double-precision floating point
35326 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35328 void
35329 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35331 machine_mode mode = GET_MODE (src);
35332 rtx e = gen_reg_rtx (mode);
35333 rtx g = gen_reg_rtx (mode);
35334 rtx h = gen_reg_rtx (mode);
35336 /* Low precision estimates guarantee 5 bits of accuracy. High
35337 precision estimates guarantee 14 bits of accuracy. SFmode
35338 requires 23 bits of accuracy. DFmode requires 52 bits of
35339 accuracy. Each pass at least doubles the accuracy, leading
35340 to the following. */
35341 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35342 if (mode == DFmode || mode == V2DFmode)
35343 passes++;
35345 int i;
35346 rtx mhalf;
35347 enum insn_code code = optab_handler (smul_optab, mode);
35348 insn_gen_fn gen_mul = GEN_FCN (code);
35350 gcc_assert (code != CODE_FOR_nothing);
35352 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35354 /* e = rsqrt estimate */
35355 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35356 UNSPEC_RSQRT)));
35358 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35359 if (!recip)
35361 rtx zero = force_reg (mode, CONST0_RTX (mode));
35363 if (mode == SFmode)
35365 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35366 e, zero, mode, 0);
35367 if (target != e)
35368 emit_move_insn (e, target);
35370 else
35372 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35373 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35377 /* g = sqrt estimate. */
35378 emit_insn (gen_mul (g, e, src));
35379 /* h = 1/(2*sqrt) estimate. */
35380 emit_insn (gen_mul (h, e, mhalf));
35382 if (recip)
35384 if (passes == 1)
35386 rtx t = gen_reg_rtx (mode);
35387 rs6000_emit_nmsub (t, g, h, mhalf);
35388 /* Apply correction directly to 1/rsqrt estimate. */
35389 rs6000_emit_madd (dst, e, t, e);
35391 else
35393 for (i = 0; i < passes; i++)
35395 rtx t1 = gen_reg_rtx (mode);
35396 rtx g1 = gen_reg_rtx (mode);
35397 rtx h1 = gen_reg_rtx (mode);
35399 rs6000_emit_nmsub (t1, g, h, mhalf);
35400 rs6000_emit_madd (g1, g, t1, g);
35401 rs6000_emit_madd (h1, h, t1, h);
35403 g = g1;
35404 h = h1;
35406 /* Multiply by 2 for 1/rsqrt. */
35407 emit_insn (gen_add3_insn (dst, h, h));
35410 else
35412 rtx t = gen_reg_rtx (mode);
35413 rs6000_emit_nmsub (t, g, h, mhalf);
35414 rs6000_emit_madd (dst, g, t, g);
35417 return;
35420 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35421 (Power7) targets. DST is the target, and SRC is the argument operand. */
35423 void
35424 rs6000_emit_popcount (rtx dst, rtx src)
35426 machine_mode mode = GET_MODE (dst);
35427 rtx tmp1, tmp2;
35429 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35430 if (TARGET_POPCNTD)
35432 if (mode == SImode)
35433 emit_insn (gen_popcntdsi2 (dst, src));
35434 else
35435 emit_insn (gen_popcntddi2 (dst, src));
35436 return;
35439 tmp1 = gen_reg_rtx (mode);
35441 if (mode == SImode)
35443 emit_insn (gen_popcntbsi2 (tmp1, src));
35444 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35445 NULL_RTX, 0);
35446 tmp2 = force_reg (SImode, tmp2);
35447 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35449 else
35451 emit_insn (gen_popcntbdi2 (tmp1, src));
35452 tmp2 = expand_mult (DImode, tmp1,
35453 GEN_INT ((HOST_WIDE_INT)
35454 0x01010101 << 32 | 0x01010101),
35455 NULL_RTX, 0);
35456 tmp2 = force_reg (DImode, tmp2);
35457 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35462 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35463 target, and SRC is the argument operand. */
35465 void
35466 rs6000_emit_parity (rtx dst, rtx src)
35468 machine_mode mode = GET_MODE (dst);
35469 rtx tmp;
35471 tmp = gen_reg_rtx (mode);
35473 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35474 if (TARGET_CMPB)
35476 if (mode == SImode)
35478 emit_insn (gen_popcntbsi2 (tmp, src));
35479 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35481 else
35483 emit_insn (gen_popcntbdi2 (tmp, src));
35484 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35486 return;
35489 if (mode == SImode)
35491 /* Is mult+shift >= shift+xor+shift+xor? */
35492 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35494 rtx tmp1, tmp2, tmp3, tmp4;
35496 tmp1 = gen_reg_rtx (SImode);
35497 emit_insn (gen_popcntbsi2 (tmp1, src));
35499 tmp2 = gen_reg_rtx (SImode);
35500 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35501 tmp3 = gen_reg_rtx (SImode);
35502 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35504 tmp4 = gen_reg_rtx (SImode);
35505 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35506 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35508 else
35509 rs6000_emit_popcount (tmp, src);
35510 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35512 else
35514 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35515 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35517 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35519 tmp1 = gen_reg_rtx (DImode);
35520 emit_insn (gen_popcntbdi2 (tmp1, src));
35522 tmp2 = gen_reg_rtx (DImode);
35523 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35524 tmp3 = gen_reg_rtx (DImode);
35525 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35527 tmp4 = gen_reg_rtx (DImode);
35528 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35529 tmp5 = gen_reg_rtx (DImode);
35530 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35532 tmp6 = gen_reg_rtx (DImode);
35533 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35534 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35536 else
35537 rs6000_emit_popcount (tmp, src);
35538 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35542 /* Expand an Altivec constant permutation for little endian mode.
35543 There are two issues: First, the two input operands must be
35544 swapped so that together they form a double-wide array in LE
35545 order. Second, the vperm instruction has surprising behavior
35546 in LE mode: it interprets the elements of the source vectors
35547 in BE mode ("left to right") and interprets the elements of
35548 the destination vector in LE mode ("right to left"). To
35549 correct for this, we must subtract each element of the permute
35550 control vector from 31.
35552 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35553 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35554 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35555 serve as the permute control vector. Then, in BE mode,
35557 vperm 9,10,11,12
35559 places the desired result in vr9. However, in LE mode the
35560 vector contents will be
35562 vr10 = 00000003 00000002 00000001 00000000
35563 vr11 = 00000007 00000006 00000005 00000004
35565 The result of the vperm using the same permute control vector is
35567 vr9 = 05000000 07000000 01000000 03000000
35569 That is, the leftmost 4 bytes of vr10 are interpreted as the
35570 source for the rightmost 4 bytes of vr9, and so on.
35572 If we change the permute control vector to
35574 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35576 and issue
35578 vperm 9,11,10,12
35580 we get the desired
35582 vr9 = 00000006 00000004 00000002 00000000. */
35584 void
35585 altivec_expand_vec_perm_const_le (rtx operands[4])
35587 unsigned int i;
35588 rtx perm[16];
35589 rtx constv, unspec;
35590 rtx target = operands[0];
35591 rtx op0 = operands[1];
35592 rtx op1 = operands[2];
35593 rtx sel = operands[3];
35595 /* Unpack and adjust the constant selector. */
35596 for (i = 0; i < 16; ++i)
35598 rtx e = XVECEXP (sel, 0, i);
35599 unsigned int elt = 31 - (INTVAL (e) & 31);
35600 perm[i] = GEN_INT (elt);
35603 /* Expand to a permute, swapping the inputs and using the
35604 adjusted selector. */
35605 if (!REG_P (op0))
35606 op0 = force_reg (V16QImode, op0);
35607 if (!REG_P (op1))
35608 op1 = force_reg (V16QImode, op1);
35610 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35611 constv = force_reg (V16QImode, constv);
35612 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35613 UNSPEC_VPERM);
35614 if (!REG_P (target))
35616 rtx tmp = gen_reg_rtx (V16QImode);
35617 emit_move_insn (tmp, unspec);
35618 unspec = tmp;
35621 emit_move_insn (target, unspec);
35624 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35625 permute control vector. But here it's not a constant, so we must
35626 generate a vector NAND or NOR to do the adjustment. */
35628 void
35629 altivec_expand_vec_perm_le (rtx operands[4])
35631 rtx notx, iorx, unspec;
35632 rtx target = operands[0];
35633 rtx op0 = operands[1];
35634 rtx op1 = operands[2];
35635 rtx sel = operands[3];
35636 rtx tmp = target;
35637 rtx norreg = gen_reg_rtx (V16QImode);
35638 machine_mode mode = GET_MODE (target);
35640 /* Get everything in regs so the pattern matches. */
35641 if (!REG_P (op0))
35642 op0 = force_reg (mode, op0);
35643 if (!REG_P (op1))
35644 op1 = force_reg (mode, op1);
35645 if (!REG_P (sel))
35646 sel = force_reg (V16QImode, sel);
35647 if (!REG_P (target))
35648 tmp = gen_reg_rtx (mode);
35650 if (TARGET_P9_VECTOR)
35652 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35653 UNSPEC_VPERMR);
35655 else
35657 /* Invert the selector with a VNAND if available, else a VNOR.
35658 The VNAND is preferred for future fusion opportunities. */
35659 notx = gen_rtx_NOT (V16QImode, sel);
35660 iorx = (TARGET_P8_VECTOR
35661 ? gen_rtx_IOR (V16QImode, notx, notx)
35662 : gen_rtx_AND (V16QImode, notx, notx));
35663 emit_insn (gen_rtx_SET (norreg, iorx));
35665 /* Permute with operands reversed and adjusted selector. */
35666 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35667 UNSPEC_VPERM);
35670 /* Copy into target, possibly by way of a register. */
35671 if (!REG_P (target))
35673 emit_move_insn (tmp, unspec);
35674 unspec = tmp;
35677 emit_move_insn (target, unspec);
35680 /* Expand an Altivec constant permutation. Return true if we match
35681 an efficient implementation; false to fall back to VPERM. */
35683 bool
35684 altivec_expand_vec_perm_const (rtx operands[4])
35686 struct altivec_perm_insn {
35687 HOST_WIDE_INT mask;
35688 enum insn_code impl;
35689 unsigned char perm[16];
35691 static const struct altivec_perm_insn patterns[] = {
35692 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35693 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35694 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35695 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35696 { OPTION_MASK_ALTIVEC,
35697 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35698 : CODE_FOR_altivec_vmrglb_direct),
35699 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35700 { OPTION_MASK_ALTIVEC,
35701 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35702 : CODE_FOR_altivec_vmrglh_direct),
35703 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35704 { OPTION_MASK_ALTIVEC,
35705 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35706 : CODE_FOR_altivec_vmrglw_direct),
35707 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35708 { OPTION_MASK_ALTIVEC,
35709 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35710 : CODE_FOR_altivec_vmrghb_direct),
35711 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35712 { OPTION_MASK_ALTIVEC,
35713 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35714 : CODE_FOR_altivec_vmrghh_direct),
35715 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35716 { OPTION_MASK_ALTIVEC,
35717 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35718 : CODE_FOR_altivec_vmrghw_direct),
35719 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35720 { OPTION_MASK_P8_VECTOR,
35721 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35722 : CODE_FOR_p8_vmrgow_v4sf_direct),
35723 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35724 { OPTION_MASK_P8_VECTOR,
35725 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35726 : CODE_FOR_p8_vmrgew_v4sf_direct),
35727 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35730 unsigned int i, j, elt, which;
35731 unsigned char perm[16];
35732 rtx target, op0, op1, sel, x;
35733 bool one_vec;
35735 target = operands[0];
35736 op0 = operands[1];
35737 op1 = operands[2];
35738 sel = operands[3];
35740 /* Unpack the constant selector. */
35741 for (i = which = 0; i < 16; ++i)
35743 rtx e = XVECEXP (sel, 0, i);
35744 elt = INTVAL (e) & 31;
35745 which |= (elt < 16 ? 1 : 2);
35746 perm[i] = elt;
35749 /* Simplify the constant selector based on operands. */
35750 switch (which)
35752 default:
35753 gcc_unreachable ();
35755 case 3:
35756 one_vec = false;
35757 if (!rtx_equal_p (op0, op1))
35758 break;
35759 /* FALLTHRU */
35761 case 2:
35762 for (i = 0; i < 16; ++i)
35763 perm[i] &= 15;
35764 op0 = op1;
35765 one_vec = true;
35766 break;
35768 case 1:
35769 op1 = op0;
35770 one_vec = true;
35771 break;
35774 /* Look for splat patterns. */
35775 if (one_vec)
35777 elt = perm[0];
35779 for (i = 0; i < 16; ++i)
35780 if (perm[i] != elt)
35781 break;
35782 if (i == 16)
35784 if (!BYTES_BIG_ENDIAN)
35785 elt = 15 - elt;
35786 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35787 return true;
35790 if (elt % 2 == 0)
35792 for (i = 0; i < 16; i += 2)
35793 if (perm[i] != elt || perm[i + 1] != elt + 1)
35794 break;
35795 if (i == 16)
35797 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35798 x = gen_reg_rtx (V8HImode);
35799 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35800 GEN_INT (field)));
35801 emit_move_insn (target, gen_lowpart (V16QImode, x));
35802 return true;
35806 if (elt % 4 == 0)
35808 for (i = 0; i < 16; i += 4)
35809 if (perm[i] != elt
35810 || perm[i + 1] != elt + 1
35811 || perm[i + 2] != elt + 2
35812 || perm[i + 3] != elt + 3)
35813 break;
35814 if (i == 16)
35816 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35817 x = gen_reg_rtx (V4SImode);
35818 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35819 GEN_INT (field)));
35820 emit_move_insn (target, gen_lowpart (V16QImode, x));
35821 return true;
35826 /* Look for merge and pack patterns. */
35827 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35829 bool swapped;
35831 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35832 continue;
35834 elt = patterns[j].perm[0];
35835 if (perm[0] == elt)
35836 swapped = false;
35837 else if (perm[0] == elt + 16)
35838 swapped = true;
35839 else
35840 continue;
35841 for (i = 1; i < 16; ++i)
35843 elt = patterns[j].perm[i];
35844 if (swapped)
35845 elt = (elt >= 16 ? elt - 16 : elt + 16);
35846 else if (one_vec && elt >= 16)
35847 elt -= 16;
35848 if (perm[i] != elt)
35849 break;
35851 if (i == 16)
35853 enum insn_code icode = patterns[j].impl;
35854 machine_mode omode = insn_data[icode].operand[0].mode;
35855 machine_mode imode = insn_data[icode].operand[1].mode;
35857 /* For little-endian, don't use vpkuwum and vpkuhum if the
35858 underlying vector type is not V4SI and V8HI, respectively.
35859 For example, using vpkuwum with a V8HI picks up the even
35860 halfwords (BE numbering) when the even halfwords (LE
35861 numbering) are what we need. */
35862 if (!BYTES_BIG_ENDIAN
35863 && icode == CODE_FOR_altivec_vpkuwum_direct
35864 && ((GET_CODE (op0) == REG
35865 && GET_MODE (op0) != V4SImode)
35866 || (GET_CODE (op0) == SUBREG
35867 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35868 continue;
35869 if (!BYTES_BIG_ENDIAN
35870 && icode == CODE_FOR_altivec_vpkuhum_direct
35871 && ((GET_CODE (op0) == REG
35872 && GET_MODE (op0) != V8HImode)
35873 || (GET_CODE (op0) == SUBREG
35874 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35875 continue;
35877 /* For little-endian, the two input operands must be swapped
35878 (or swapped back) to ensure proper right-to-left numbering
35879 from 0 to 2N-1. */
35880 if (swapped ^ !BYTES_BIG_ENDIAN)
35881 std::swap (op0, op1);
35882 if (imode != V16QImode)
35884 op0 = gen_lowpart (imode, op0);
35885 op1 = gen_lowpart (imode, op1);
35887 if (omode == V16QImode)
35888 x = target;
35889 else
35890 x = gen_reg_rtx (omode);
35891 emit_insn (GEN_FCN (icode) (x, op0, op1));
35892 if (omode != V16QImode)
35893 emit_move_insn (target, gen_lowpart (V16QImode, x));
35894 return true;
35898 if (!BYTES_BIG_ENDIAN)
35900 altivec_expand_vec_perm_const_le (operands);
35901 return true;
35904 return false;
35907 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35908 Return true if we match an efficient implementation. */
35910 static bool
35911 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35912 unsigned char perm0, unsigned char perm1)
35914 rtx x;
35916 /* If both selectors come from the same operand, fold to single op. */
35917 if ((perm0 & 2) == (perm1 & 2))
35919 if (perm0 & 2)
35920 op0 = op1;
35921 else
35922 op1 = op0;
35924 /* If both operands are equal, fold to simpler permutation. */
35925 if (rtx_equal_p (op0, op1))
35927 perm0 = perm0 & 1;
35928 perm1 = (perm1 & 1) + 2;
35930 /* If the first selector comes from the second operand, swap. */
35931 else if (perm0 & 2)
35933 if (perm1 & 2)
35934 return false;
35935 perm0 -= 2;
35936 perm1 += 2;
35937 std::swap (op0, op1);
35939 /* If the second selector does not come from the second operand, fail. */
35940 else if ((perm1 & 2) == 0)
35941 return false;
35943 /* Success! */
35944 if (target != NULL)
35946 machine_mode vmode, dmode;
35947 rtvec v;
35949 vmode = GET_MODE (target);
35950 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35951 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35952 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35953 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35954 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35955 emit_insn (gen_rtx_SET (target, x));
35957 return true;
35960 bool
35961 rs6000_expand_vec_perm_const (rtx operands[4])
35963 rtx target, op0, op1, sel;
35964 unsigned char perm0, perm1;
35966 target = operands[0];
35967 op0 = operands[1];
35968 op1 = operands[2];
35969 sel = operands[3];
35971 /* Unpack the constant selector. */
35972 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35973 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35975 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35978 /* Test whether a constant permutation is supported. */
35980 static bool
35981 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode, vec_perm_indices sel)
35983 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35984 if (TARGET_ALTIVEC)
35985 return true;
35987 /* Check for ps_merge* or evmerge* insns. */
35988 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35990 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35991 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35992 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35995 return false;
35998 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
36000 static void
36001 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
36002 machine_mode vmode, unsigned nelt, rtx perm[])
36004 machine_mode imode;
36005 rtx x;
36007 imode = vmode;
36008 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
36009 imode = mode_for_int_vector (vmode).require ();
36011 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
36012 x = expand_vec_perm (vmode, op0, op1, x, target);
36013 if (x != target)
36014 emit_move_insn (target, x);
36017 /* Expand an extract even operation. */
36019 void
36020 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
36022 machine_mode vmode = GET_MODE (target);
36023 unsigned i, nelt = GET_MODE_NUNITS (vmode);
36024 rtx perm[16];
36026 for (i = 0; i < nelt; i++)
36027 perm[i] = GEN_INT (i * 2);
36029 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
36032 /* Expand a vector interleave operation. */
36034 void
36035 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
36037 machine_mode vmode = GET_MODE (target);
36038 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
36039 rtx perm[16];
36041 high = (highp ? 0 : nelt / 2);
36042 for (i = 0; i < nelt / 2; i++)
36044 perm[i * 2] = GEN_INT (i + high);
36045 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
36048 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
36051 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36052 void
36053 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
36055 HOST_WIDE_INT hwi_scale (scale);
36056 REAL_VALUE_TYPE r_pow;
36057 rtvec v = rtvec_alloc (2);
36058 rtx elt;
36059 rtx scale_vec = gen_reg_rtx (V2DFmode);
36060 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
36061 elt = const_double_from_real_value (r_pow, DFmode);
36062 RTVEC_ELT (v, 0) = elt;
36063 RTVEC_ELT (v, 1) = elt;
36064 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
36065 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
36068 /* Return an RTX representing where to find the function value of a
36069 function returning MODE. */
36070 static rtx
36071 rs6000_complex_function_value (machine_mode mode)
36073 unsigned int regno;
36074 rtx r1, r2;
36075 machine_mode inner = GET_MODE_INNER (mode);
36076 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
36078 if (TARGET_FLOAT128_TYPE
36079 && (mode == KCmode
36080 || (mode == TCmode && TARGET_IEEEQUAD)))
36081 regno = ALTIVEC_ARG_RETURN;
36083 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36084 regno = FP_ARG_RETURN;
36086 else
36088 regno = GP_ARG_RETURN;
36090 /* 32-bit is OK since it'll go in r3/r4. */
36091 if (TARGET_32BIT && inner_bytes >= 4)
36092 return gen_rtx_REG (mode, regno);
36095 if (inner_bytes >= 8)
36096 return gen_rtx_REG (mode, regno);
36098 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36099 const0_rtx);
36100 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36101 GEN_INT (inner_bytes));
36102 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36105 /* Return an rtx describing a return value of MODE as a PARALLEL
36106 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36107 stride REG_STRIDE. */
36109 static rtx
36110 rs6000_parallel_return (machine_mode mode,
36111 int n_elts, machine_mode elt_mode,
36112 unsigned int regno, unsigned int reg_stride)
36114 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36116 int i;
36117 for (i = 0; i < n_elts; i++)
36119 rtx r = gen_rtx_REG (elt_mode, regno);
36120 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36121 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36122 regno += reg_stride;
36125 return par;
36128 /* Target hook for TARGET_FUNCTION_VALUE.
36130 An integer value is in r3 and a floating-point value is in fp1,
36131 unless -msoft-float. */
36133 static rtx
36134 rs6000_function_value (const_tree valtype,
36135 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36136 bool outgoing ATTRIBUTE_UNUSED)
36138 machine_mode mode;
36139 unsigned int regno;
36140 machine_mode elt_mode;
36141 int n_elts;
36143 /* Special handling for structs in darwin64. */
36144 if (TARGET_MACHO
36145 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36147 CUMULATIVE_ARGS valcum;
36148 rtx valret;
36150 valcum.words = 0;
36151 valcum.fregno = FP_ARG_MIN_REG;
36152 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36153 /* Do a trial code generation as if this were going to be passed as
36154 an argument; if any part goes in memory, we return NULL. */
36155 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36156 if (valret)
36157 return valret;
36158 /* Otherwise fall through to standard ABI rules. */
36161 mode = TYPE_MODE (valtype);
36163 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36164 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36166 int first_reg, n_regs;
36168 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36170 /* _Decimal128 must use even/odd register pairs. */
36171 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36172 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36174 else
36176 first_reg = ALTIVEC_ARG_RETURN;
36177 n_regs = 1;
36180 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36183 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36184 if (TARGET_32BIT && TARGET_POWERPC64)
36185 switch (mode)
36187 default:
36188 break;
36189 case E_DImode:
36190 case E_SCmode:
36191 case E_DCmode:
36192 case E_TCmode:
36193 int count = GET_MODE_SIZE (mode) / 4;
36194 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36197 if ((INTEGRAL_TYPE_P (valtype)
36198 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36199 || POINTER_TYPE_P (valtype))
36200 mode = TARGET_32BIT ? SImode : DImode;
36202 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36203 /* _Decimal128 must use an even/odd register pair. */
36204 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36205 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36206 && !FLOAT128_VECTOR_P (mode)
36207 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
36208 regno = FP_ARG_RETURN;
36209 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36210 && targetm.calls.split_complex_arg)
36211 return rs6000_complex_function_value (mode);
36212 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36213 return register is used in both cases, and we won't see V2DImode/V2DFmode
36214 for pure altivec, combine the two cases. */
36215 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36216 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36217 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36218 regno = ALTIVEC_ARG_RETURN;
36219 else
36220 regno = GP_ARG_RETURN;
36222 return gen_rtx_REG (mode, regno);
36225 /* Define how to find the value returned by a library function
36226 assuming the value has mode MODE. */
36228 rs6000_libcall_value (machine_mode mode)
36230 unsigned int regno;
36232 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36233 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36234 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36236 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36237 /* _Decimal128 must use an even/odd register pair. */
36238 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36239 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
36240 && TARGET_HARD_FLOAT
36241 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
36242 regno = FP_ARG_RETURN;
36243 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36244 return register is used in both cases, and we won't see V2DImode/V2DFmode
36245 for pure altivec, combine the two cases. */
36246 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36247 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36248 regno = ALTIVEC_ARG_RETURN;
36249 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36250 return rs6000_complex_function_value (mode);
36251 else
36252 regno = GP_ARG_RETURN;
36254 return gen_rtx_REG (mode, regno);
36257 /* Compute register pressure classes. We implement the target hook to avoid
36258 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36259 lead to incorrect estimates of number of available registers and therefor
36260 increased register pressure/spill. */
36261 static int
36262 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36264 int n;
36266 n = 0;
36267 pressure_classes[n++] = GENERAL_REGS;
36268 if (TARGET_VSX)
36269 pressure_classes[n++] = VSX_REGS;
36270 else
36272 if (TARGET_ALTIVEC)
36273 pressure_classes[n++] = ALTIVEC_REGS;
36274 if (TARGET_HARD_FLOAT)
36275 pressure_classes[n++] = FLOAT_REGS;
36277 pressure_classes[n++] = CR_REGS;
36278 pressure_classes[n++] = SPECIAL_REGS;
36280 return n;
36283 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36284 Frame pointer elimination is automatically handled.
36286 For the RS/6000, if frame pointer elimination is being done, we would like
36287 to convert ap into fp, not sp.
36289 We need r30 if -mminimal-toc was specified, and there are constant pool
36290 references. */
36292 static bool
36293 rs6000_can_eliminate (const int from, const int to)
36295 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36296 ? ! frame_pointer_needed
36297 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36298 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36299 || constant_pool_empty_p ()
36300 : true);
36303 /* Define the offset between two registers, FROM to be eliminated and its
36304 replacement TO, at the start of a routine. */
36305 HOST_WIDE_INT
36306 rs6000_initial_elimination_offset (int from, int to)
36308 rs6000_stack_t *info = rs6000_stack_info ();
36309 HOST_WIDE_INT offset;
36311 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36312 offset = info->push_p ? 0 : -info->total_size;
36313 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36315 offset = info->push_p ? 0 : -info->total_size;
36316 if (FRAME_GROWS_DOWNWARD)
36317 offset += info->fixed_size + info->vars_size + info->parm_size;
36319 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36320 offset = FRAME_GROWS_DOWNWARD
36321 ? info->fixed_size + info->vars_size + info->parm_size
36322 : 0;
36323 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36324 offset = info->total_size;
36325 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36326 offset = info->push_p ? info->total_size : 0;
36327 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36328 offset = 0;
36329 else
36330 gcc_unreachable ();
36332 return offset;
36335 /* Fill in sizes of registers used by unwinder. */
36337 static void
36338 rs6000_init_dwarf_reg_sizes_extra (tree address)
36340 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36342 int i;
36343 machine_mode mode = TYPE_MODE (char_type_node);
36344 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36345 rtx mem = gen_rtx_MEM (BLKmode, addr);
36346 rtx value = gen_int_mode (16, mode);
36348 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36349 The unwinder still needs to know the size of Altivec registers. */
36351 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36353 int column = DWARF_REG_TO_UNWIND_COLUMN
36354 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36355 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36357 emit_move_insn (adjust_address (mem, mode, offset), value);
36362 /* Map internal gcc register numbers to debug format register numbers.
36363 FORMAT specifies the type of debug register number to use:
36364 0 -- debug information, except for frame-related sections
36365 1 -- DWARF .debug_frame section
36366 2 -- DWARF .eh_frame section */
36368 unsigned int
36369 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36371 /* Except for the above, we use the internal number for non-DWARF
36372 debug information, and also for .eh_frame. */
36373 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36374 return regno;
36376 /* On some platforms, we use the standard DWARF register
36377 numbering for .debug_info and .debug_frame. */
36378 #ifdef RS6000_USE_DWARF_NUMBERING
36379 if (regno <= 63)
36380 return regno;
36381 if (regno == LR_REGNO)
36382 return 108;
36383 if (regno == CTR_REGNO)
36384 return 109;
36385 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36386 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36387 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36388 to the DWARF reg for CR. */
36389 if (format == 1 && regno == CR2_REGNO)
36390 return 64;
36391 if (CR_REGNO_P (regno))
36392 return regno - CR0_REGNO + 86;
36393 if (regno == CA_REGNO)
36394 return 101; /* XER */
36395 if (ALTIVEC_REGNO_P (regno))
36396 return regno - FIRST_ALTIVEC_REGNO + 1124;
36397 if (regno == VRSAVE_REGNO)
36398 return 356;
36399 if (regno == VSCR_REGNO)
36400 return 67;
36401 #endif
36402 return regno;
36405 /* target hook eh_return_filter_mode */
36406 static scalar_int_mode
36407 rs6000_eh_return_filter_mode (void)
36409 return TARGET_32BIT ? SImode : word_mode;
36412 /* Target hook for scalar_mode_supported_p. */
36413 static bool
36414 rs6000_scalar_mode_supported_p (scalar_mode mode)
36416 /* -m32 does not support TImode. This is the default, from
36417 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36418 same ABI as for -m32. But default_scalar_mode_supported_p allows
36419 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36420 for -mpowerpc64. */
36421 if (TARGET_32BIT && mode == TImode)
36422 return false;
36424 if (DECIMAL_FLOAT_MODE_P (mode))
36425 return default_decimal_float_supported_p ();
36426 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36427 return true;
36428 else
36429 return default_scalar_mode_supported_p (mode);
36432 /* Target hook for vector_mode_supported_p. */
36433 static bool
36434 rs6000_vector_mode_supported_p (machine_mode mode)
36437 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36438 return true;
36440 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36441 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36442 double-double. */
36443 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36444 return true;
36446 else
36447 return false;
36450 /* Target hook for floatn_mode. */
36451 static opt_scalar_float_mode
36452 rs6000_floatn_mode (int n, bool extended)
36454 if (extended)
36456 switch (n)
36458 case 32:
36459 return DFmode;
36461 case 64:
36462 if (TARGET_FLOAT128_TYPE)
36463 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36464 else
36465 return opt_scalar_float_mode ();
36467 case 128:
36468 return opt_scalar_float_mode ();
36470 default:
36471 /* Those are the only valid _FloatNx types. */
36472 gcc_unreachable ();
36475 else
36477 switch (n)
36479 case 32:
36480 return SFmode;
36482 case 64:
36483 return DFmode;
36485 case 128:
36486 if (TARGET_FLOAT128_TYPE)
36487 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36488 else
36489 return opt_scalar_float_mode ();
36491 default:
36492 return opt_scalar_float_mode ();
36498 /* Target hook for c_mode_for_suffix. */
36499 static machine_mode
36500 rs6000_c_mode_for_suffix (char suffix)
36502 if (TARGET_FLOAT128_TYPE)
36504 if (suffix == 'q' || suffix == 'Q')
36505 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36507 /* At the moment, we are not defining a suffix for IBM extended double.
36508 If/when the default for -mabi=ieeelongdouble is changed, and we want
36509 to support __ibm128 constants in legacy library code, we may need to
36510 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36511 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36512 __float80 constants. */
36515 return VOIDmode;
36518 /* Target hook for invalid_arg_for_unprototyped_fn. */
36519 static const char *
36520 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36522 return (!rs6000_darwin64_abi
36523 && typelist == 0
36524 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36525 && (funcdecl == NULL_TREE
36526 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36527 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36528 ? N_("AltiVec argument passed to unprototyped function")
36529 : NULL;
36532 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36533 setup by using __stack_chk_fail_local hidden function instead of
36534 calling __stack_chk_fail directly. Otherwise it is better to call
36535 __stack_chk_fail directly. */
36537 static tree ATTRIBUTE_UNUSED
36538 rs6000_stack_protect_fail (void)
36540 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36541 ? default_hidden_stack_protect_fail ()
36542 : default_external_stack_protect_fail ();
36545 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36547 #if TARGET_ELF
36548 static unsigned HOST_WIDE_INT
36549 rs6000_asan_shadow_offset (void)
36551 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36553 #endif
36555 /* Mask options that we want to support inside of attribute((target)) and
36556 #pragma GCC target operations. Note, we do not include things like
36557 64/32-bit, endianness, hard/soft floating point, etc. that would have
36558 different calling sequences. */
36560 struct rs6000_opt_mask {
36561 const char *name; /* option name */
36562 HOST_WIDE_INT mask; /* mask to set */
36563 bool invert; /* invert sense of mask */
36564 bool valid_target; /* option is a target option */
36567 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36569 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36570 { "cmpb", OPTION_MASK_CMPB, false, true },
36571 { "crypto", OPTION_MASK_CRYPTO, false, true },
36572 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36573 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36574 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36575 false, true },
36576 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36577 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36578 { "fprnd", OPTION_MASK_FPRND, false, true },
36579 { "hard-dfp", OPTION_MASK_DFP, false, true },
36580 { "htm", OPTION_MASK_HTM, false, true },
36581 { "isel", OPTION_MASK_ISEL, false, true },
36582 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36583 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36584 { "modulo", OPTION_MASK_MODULO, false, true },
36585 { "mulhw", OPTION_MASK_MULHW, false, true },
36586 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36587 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36588 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36589 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36590 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36591 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36592 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36593 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36594 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36595 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36596 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36597 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36598 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36599 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36600 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36601 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36602 { "string", OPTION_MASK_STRING, false, true },
36603 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36604 { "update", OPTION_MASK_NO_UPDATE, true , true },
36605 { "vsx", OPTION_MASK_VSX, false, true },
36606 #ifdef OPTION_MASK_64BIT
36607 #if TARGET_AIX_OS
36608 { "aix64", OPTION_MASK_64BIT, false, false },
36609 { "aix32", OPTION_MASK_64BIT, true, false },
36610 #else
36611 { "64", OPTION_MASK_64BIT, false, false },
36612 { "32", OPTION_MASK_64BIT, true, false },
36613 #endif
36614 #endif
36615 #ifdef OPTION_MASK_EABI
36616 { "eabi", OPTION_MASK_EABI, false, false },
36617 #endif
36618 #ifdef OPTION_MASK_LITTLE_ENDIAN
36619 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36620 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36621 #endif
36622 #ifdef OPTION_MASK_RELOCATABLE
36623 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36624 #endif
36625 #ifdef OPTION_MASK_STRICT_ALIGN
36626 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36627 #endif
36628 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36629 { "string", OPTION_MASK_STRING, false, false },
36632 /* Builtin mask mapping for printing the flags. */
36633 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36635 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36636 { "vsx", RS6000_BTM_VSX, false, false },
36637 { "paired", RS6000_BTM_PAIRED, false, false },
36638 { "fre", RS6000_BTM_FRE, false, false },
36639 { "fres", RS6000_BTM_FRES, false, false },
36640 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36641 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36642 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36643 { "cell", RS6000_BTM_CELL, false, false },
36644 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36645 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36646 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36647 { "crypto", RS6000_BTM_CRYPTO, false, false },
36648 { "htm", RS6000_BTM_HTM, false, false },
36649 { "hard-dfp", RS6000_BTM_DFP, false, false },
36650 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36651 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36652 { "float128", RS6000_BTM_FLOAT128, false, false },
36653 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36656 /* Option variables that we want to support inside attribute((target)) and
36657 #pragma GCC target operations. */
36659 struct rs6000_opt_var {
36660 const char *name; /* option name */
36661 size_t global_offset; /* offset of the option in global_options. */
36662 size_t target_offset; /* offset of the option in target options. */
36665 static struct rs6000_opt_var const rs6000_opt_vars[] =
36667 { "friz",
36668 offsetof (struct gcc_options, x_TARGET_FRIZ),
36669 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36670 { "avoid-indexed-addresses",
36671 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36672 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36673 { "paired",
36674 offsetof (struct gcc_options, x_rs6000_paired_float),
36675 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36676 { "longcall",
36677 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36678 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36679 { "optimize-swaps",
36680 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36681 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36682 { "allow-movmisalign",
36683 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36684 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36685 { "sched-groups",
36686 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36687 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36688 { "always-hint",
36689 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36690 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36691 { "align-branch-targets",
36692 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36693 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36694 { "tls-markers",
36695 offsetof (struct gcc_options, x_tls_markers),
36696 offsetof (struct cl_target_option, x_tls_markers), },
36697 { "sched-prolog",
36698 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36699 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36700 { "sched-epilog",
36701 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36702 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36705 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36706 parsing. Return true if there were no errors. */
36708 static bool
36709 rs6000_inner_target_options (tree args, bool attr_p)
36711 bool ret = true;
36713 if (args == NULL_TREE)
36716 else if (TREE_CODE (args) == STRING_CST)
36718 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36719 char *q;
36721 while ((q = strtok (p, ",")) != NULL)
36723 bool error_p = false;
36724 bool not_valid_p = false;
36725 const char *cpu_opt = NULL;
36727 p = NULL;
36728 if (strncmp (q, "cpu=", 4) == 0)
36730 int cpu_index = rs6000_cpu_name_lookup (q+4);
36731 if (cpu_index >= 0)
36732 rs6000_cpu_index = cpu_index;
36733 else
36735 error_p = true;
36736 cpu_opt = q+4;
36739 else if (strncmp (q, "tune=", 5) == 0)
36741 int tune_index = rs6000_cpu_name_lookup (q+5);
36742 if (tune_index >= 0)
36743 rs6000_tune_index = tune_index;
36744 else
36746 error_p = true;
36747 cpu_opt = q+5;
36750 else
36752 size_t i;
36753 bool invert = false;
36754 char *r = q;
36756 error_p = true;
36757 if (strncmp (r, "no-", 3) == 0)
36759 invert = true;
36760 r += 3;
36763 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36764 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36766 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36768 if (!rs6000_opt_masks[i].valid_target)
36769 not_valid_p = true;
36770 else
36772 error_p = false;
36773 rs6000_isa_flags_explicit |= mask;
36775 /* VSX needs altivec, so -mvsx automagically sets
36776 altivec and disables -mavoid-indexed-addresses. */
36777 if (!invert)
36779 if (mask == OPTION_MASK_VSX)
36781 mask |= OPTION_MASK_ALTIVEC;
36782 TARGET_AVOID_XFORM = 0;
36786 if (rs6000_opt_masks[i].invert)
36787 invert = !invert;
36789 if (invert)
36790 rs6000_isa_flags &= ~mask;
36791 else
36792 rs6000_isa_flags |= mask;
36794 break;
36797 if (error_p && !not_valid_p)
36799 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36800 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36802 size_t j = rs6000_opt_vars[i].global_offset;
36803 *((int *) ((char *)&global_options + j)) = !invert;
36804 error_p = false;
36805 not_valid_p = false;
36806 break;
36811 if (error_p)
36813 const char *eprefix, *esuffix;
36815 ret = false;
36816 if (attr_p)
36818 eprefix = "__attribute__((__target__(";
36819 esuffix = ")))";
36821 else
36823 eprefix = "#pragma GCC target ";
36824 esuffix = "";
36827 if (cpu_opt)
36828 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36829 q, esuffix);
36830 else if (not_valid_p)
36831 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36832 else
36833 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36838 else if (TREE_CODE (args) == TREE_LIST)
36842 tree value = TREE_VALUE (args);
36843 if (value)
36845 bool ret2 = rs6000_inner_target_options (value, attr_p);
36846 if (!ret2)
36847 ret = false;
36849 args = TREE_CHAIN (args);
36851 while (args != NULL_TREE);
36854 else
36856 error ("attribute %<target%> argument not a string");
36857 return false;
36860 return ret;
36863 /* Print out the target options as a list for -mdebug=target. */
36865 static void
36866 rs6000_debug_target_options (tree args, const char *prefix)
36868 if (args == NULL_TREE)
36869 fprintf (stderr, "%s<NULL>", prefix);
36871 else if (TREE_CODE (args) == STRING_CST)
36873 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36874 char *q;
36876 while ((q = strtok (p, ",")) != NULL)
36878 p = NULL;
36879 fprintf (stderr, "%s\"%s\"", prefix, q);
36880 prefix = ", ";
36884 else if (TREE_CODE (args) == TREE_LIST)
36888 tree value = TREE_VALUE (args);
36889 if (value)
36891 rs6000_debug_target_options (value, prefix);
36892 prefix = ", ";
36894 args = TREE_CHAIN (args);
36896 while (args != NULL_TREE);
36899 else
36900 gcc_unreachable ();
36902 return;
36906 /* Hook to validate attribute((target("..."))). */
36908 static bool
36909 rs6000_valid_attribute_p (tree fndecl,
36910 tree ARG_UNUSED (name),
36911 tree args,
36912 int flags)
36914 struct cl_target_option cur_target;
36915 bool ret;
36916 tree old_optimize;
36917 tree new_target, new_optimize;
36918 tree func_optimize;
36920 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36922 if (TARGET_DEBUG_TARGET)
36924 tree tname = DECL_NAME (fndecl);
36925 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36926 if (tname)
36927 fprintf (stderr, "function: %.*s\n",
36928 (int) IDENTIFIER_LENGTH (tname),
36929 IDENTIFIER_POINTER (tname));
36930 else
36931 fprintf (stderr, "function: unknown\n");
36933 fprintf (stderr, "args:");
36934 rs6000_debug_target_options (args, " ");
36935 fprintf (stderr, "\n");
36937 if (flags)
36938 fprintf (stderr, "flags: 0x%x\n", flags);
36940 fprintf (stderr, "--------------------\n");
36943 /* attribute((target("default"))) does nothing, beyond
36944 affecting multi-versioning. */
36945 if (TREE_VALUE (args)
36946 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36947 && TREE_CHAIN (args) == NULL_TREE
36948 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36949 return true;
36951 old_optimize = build_optimization_node (&global_options);
36952 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36954 /* If the function changed the optimization levels as well as setting target
36955 options, start with the optimizations specified. */
36956 if (func_optimize && func_optimize != old_optimize)
36957 cl_optimization_restore (&global_options,
36958 TREE_OPTIMIZATION (func_optimize));
36960 /* The target attributes may also change some optimization flags, so update
36961 the optimization options if necessary. */
36962 cl_target_option_save (&cur_target, &global_options);
36963 rs6000_cpu_index = rs6000_tune_index = -1;
36964 ret = rs6000_inner_target_options (args, true);
36966 /* Set up any additional state. */
36967 if (ret)
36969 ret = rs6000_option_override_internal (false);
36970 new_target = build_target_option_node (&global_options);
36972 else
36973 new_target = NULL;
36975 new_optimize = build_optimization_node (&global_options);
36977 if (!new_target)
36978 ret = false;
36980 else if (fndecl)
36982 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36984 if (old_optimize != new_optimize)
36985 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36988 cl_target_option_restore (&global_options, &cur_target);
36990 if (old_optimize != new_optimize)
36991 cl_optimization_restore (&global_options,
36992 TREE_OPTIMIZATION (old_optimize));
36994 return ret;
36998 /* Hook to validate the current #pragma GCC target and set the state, and
36999 update the macros based on what was changed. If ARGS is NULL, then
37000 POP_TARGET is used to reset the options. */
37002 bool
37003 rs6000_pragma_target_parse (tree args, tree pop_target)
37005 tree prev_tree = build_target_option_node (&global_options);
37006 tree cur_tree;
37007 struct cl_target_option *prev_opt, *cur_opt;
37008 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
37009 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
37011 if (TARGET_DEBUG_TARGET)
37013 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
37014 fprintf (stderr, "args:");
37015 rs6000_debug_target_options (args, " ");
37016 fprintf (stderr, "\n");
37018 if (pop_target)
37020 fprintf (stderr, "pop_target:\n");
37021 debug_tree (pop_target);
37023 else
37024 fprintf (stderr, "pop_target: <NULL>\n");
37026 fprintf (stderr, "--------------------\n");
37029 if (! args)
37031 cur_tree = ((pop_target)
37032 ? pop_target
37033 : target_option_default_node);
37034 cl_target_option_restore (&global_options,
37035 TREE_TARGET_OPTION (cur_tree));
37037 else
37039 rs6000_cpu_index = rs6000_tune_index = -1;
37040 if (!rs6000_inner_target_options (args, false)
37041 || !rs6000_option_override_internal (false)
37042 || (cur_tree = build_target_option_node (&global_options))
37043 == NULL_TREE)
37045 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
37046 fprintf (stderr, "invalid pragma\n");
37048 return false;
37052 target_option_current_node = cur_tree;
37053 rs6000_activate_target_options (target_option_current_node);
37055 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37056 change the macros that are defined. */
37057 if (rs6000_target_modify_macros_ptr)
37059 prev_opt = TREE_TARGET_OPTION (prev_tree);
37060 prev_bumask = prev_opt->x_rs6000_builtin_mask;
37061 prev_flags = prev_opt->x_rs6000_isa_flags;
37063 cur_opt = TREE_TARGET_OPTION (cur_tree);
37064 cur_flags = cur_opt->x_rs6000_isa_flags;
37065 cur_bumask = cur_opt->x_rs6000_builtin_mask;
37067 diff_bumask = (prev_bumask ^ cur_bumask);
37068 diff_flags = (prev_flags ^ cur_flags);
37070 if ((diff_flags != 0) || (diff_bumask != 0))
37072 /* Delete old macros. */
37073 rs6000_target_modify_macros_ptr (false,
37074 prev_flags & diff_flags,
37075 prev_bumask & diff_bumask);
37077 /* Define new macros. */
37078 rs6000_target_modify_macros_ptr (true,
37079 cur_flags & diff_flags,
37080 cur_bumask & diff_bumask);
37084 return true;
37088 /* Remember the last target of rs6000_set_current_function. */
37089 static GTY(()) tree rs6000_previous_fndecl;
37091 /* Restore target's globals from NEW_TREE and invalidate the
37092 rs6000_previous_fndecl cache. */
37094 void
37095 rs6000_activate_target_options (tree new_tree)
37097 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37098 if (TREE_TARGET_GLOBALS (new_tree))
37099 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37100 else if (new_tree == target_option_default_node)
37101 restore_target_globals (&default_target_globals);
37102 else
37103 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37104 rs6000_previous_fndecl = NULL_TREE;
37107 /* Establish appropriate back-end context for processing the function
37108 FNDECL. The argument might be NULL to indicate processing at top
37109 level, outside of any function scope. */
37110 static void
37111 rs6000_set_current_function (tree fndecl)
37113 if (TARGET_DEBUG_TARGET)
37115 fprintf (stderr, "\n==================== rs6000_set_current_function");
37117 if (fndecl)
37118 fprintf (stderr, ", fndecl %s (%p)",
37119 (DECL_NAME (fndecl)
37120 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37121 : "<unknown>"), (void *)fndecl);
37123 if (rs6000_previous_fndecl)
37124 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37126 fprintf (stderr, "\n");
37129 /* Only change the context if the function changes. This hook is called
37130 several times in the course of compiling a function, and we don't want to
37131 slow things down too much or call target_reinit when it isn't safe. */
37132 if (fndecl == rs6000_previous_fndecl)
37133 return;
37135 tree old_tree;
37136 if (rs6000_previous_fndecl == NULL_TREE)
37137 old_tree = target_option_current_node;
37138 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37139 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37140 else
37141 old_tree = target_option_default_node;
37143 tree new_tree;
37144 if (fndecl == NULL_TREE)
37146 if (old_tree != target_option_current_node)
37147 new_tree = target_option_current_node;
37148 else
37149 new_tree = NULL_TREE;
37151 else
37153 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37154 if (new_tree == NULL_TREE)
37155 new_tree = target_option_default_node;
37158 if (TARGET_DEBUG_TARGET)
37160 if (new_tree)
37162 fprintf (stderr, "\nnew fndecl target specific options:\n");
37163 debug_tree (new_tree);
37166 if (old_tree)
37168 fprintf (stderr, "\nold fndecl target specific options:\n");
37169 debug_tree (old_tree);
37172 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37173 fprintf (stderr, "--------------------\n");
37176 if (new_tree && old_tree != new_tree)
37177 rs6000_activate_target_options (new_tree);
37179 if (fndecl)
37180 rs6000_previous_fndecl = fndecl;
37184 /* Save the current options */
37186 static void
37187 rs6000_function_specific_save (struct cl_target_option *ptr,
37188 struct gcc_options *opts)
37190 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37191 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37194 /* Restore the current options */
37196 static void
37197 rs6000_function_specific_restore (struct gcc_options *opts,
37198 struct cl_target_option *ptr)
37201 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37202 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37203 (void) rs6000_option_override_internal (false);
37206 /* Print the current options */
37208 static void
37209 rs6000_function_specific_print (FILE *file, int indent,
37210 struct cl_target_option *ptr)
37212 rs6000_print_isa_options (file, indent, "Isa options set",
37213 ptr->x_rs6000_isa_flags);
37215 rs6000_print_isa_options (file, indent, "Isa options explicit",
37216 ptr->x_rs6000_isa_flags_explicit);
37219 /* Helper function to print the current isa or misc options on a line. */
37221 static void
37222 rs6000_print_options_internal (FILE *file,
37223 int indent,
37224 const char *string,
37225 HOST_WIDE_INT flags,
37226 const char *prefix,
37227 const struct rs6000_opt_mask *opts,
37228 size_t num_elements)
37230 size_t i;
37231 size_t start_column = 0;
37232 size_t cur_column;
37233 size_t max_column = 120;
37234 size_t prefix_len = strlen (prefix);
37235 size_t comma_len = 0;
37236 const char *comma = "";
37238 if (indent)
37239 start_column += fprintf (file, "%*s", indent, "");
37241 if (!flags)
37243 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37244 return;
37247 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37249 /* Print the various mask options. */
37250 cur_column = start_column;
37251 for (i = 0; i < num_elements; i++)
37253 bool invert = opts[i].invert;
37254 const char *name = opts[i].name;
37255 const char *no_str = "";
37256 HOST_WIDE_INT mask = opts[i].mask;
37257 size_t len = comma_len + prefix_len + strlen (name);
37259 if (!invert)
37261 if ((flags & mask) == 0)
37263 no_str = "no-";
37264 len += sizeof ("no-") - 1;
37267 flags &= ~mask;
37270 else
37272 if ((flags & mask) != 0)
37274 no_str = "no-";
37275 len += sizeof ("no-") - 1;
37278 flags |= mask;
37281 cur_column += len;
37282 if (cur_column > max_column)
37284 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37285 cur_column = start_column + len;
37286 comma = "";
37289 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37290 comma = ", ";
37291 comma_len = sizeof (", ") - 1;
37294 fputs ("\n", file);
37297 /* Helper function to print the current isa options on a line. */
37299 static void
37300 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37301 HOST_WIDE_INT flags)
37303 rs6000_print_options_internal (file, indent, string, flags, "-m",
37304 &rs6000_opt_masks[0],
37305 ARRAY_SIZE (rs6000_opt_masks));
37308 static void
37309 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37310 HOST_WIDE_INT flags)
37312 rs6000_print_options_internal (file, indent, string, flags, "",
37313 &rs6000_builtin_mask_names[0],
37314 ARRAY_SIZE (rs6000_builtin_mask_names));
37317 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37318 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37319 -mupper-regs-df, etc.).
37321 If the user used -mno-power8-vector, we need to turn off all of the implicit
37322 ISA 2.07 and 3.0 options that relate to the vector unit.
37324 If the user used -mno-power9-vector, we need to turn off all of the implicit
37325 ISA 3.0 options that relate to the vector unit.
37327 This function does not handle explicit options such as the user specifying
37328 -mdirect-move. These are handled in rs6000_option_override_internal, and
37329 the appropriate error is given if needed.
37331 We return a mask of all of the implicit options that should not be enabled
37332 by default. */
37334 static HOST_WIDE_INT
37335 rs6000_disable_incompatible_switches (void)
37337 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37338 size_t i, j;
37340 static const struct {
37341 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37342 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37343 const char *const name; /* name of the switch. */
37344 } flags[] = {
37345 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37346 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37347 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37350 for (i = 0; i < ARRAY_SIZE (flags); i++)
37352 HOST_WIDE_INT no_flag = flags[i].no_flag;
37354 if ((rs6000_isa_flags & no_flag) == 0
37355 && (rs6000_isa_flags_explicit & no_flag) != 0)
37357 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37358 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37359 & rs6000_isa_flags
37360 & dep_flags);
37362 if (set_flags)
37364 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37365 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37367 set_flags &= ~rs6000_opt_masks[j].mask;
37368 error ("%<-mno-%s%> turns off %<-m%s%>",
37369 flags[i].name,
37370 rs6000_opt_masks[j].name);
37373 gcc_assert (!set_flags);
37376 rs6000_isa_flags &= ~dep_flags;
37377 ignore_masks |= no_flag | dep_flags;
37381 return ignore_masks;
37385 /* Helper function for printing the function name when debugging. */
37387 static const char *
37388 get_decl_name (tree fn)
37390 tree name;
37392 if (!fn)
37393 return "<null>";
37395 name = DECL_NAME (fn);
37396 if (!name)
37397 return "<no-name>";
37399 return IDENTIFIER_POINTER (name);
37402 /* Return the clone id of the target we are compiling code for in a target
37403 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37404 the priority list for the target clones (ordered from lowest to
37405 highest). */
37407 static int
37408 rs6000_clone_priority (tree fndecl)
37410 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37411 HOST_WIDE_INT isa_masks;
37412 int ret = CLONE_DEFAULT;
37413 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37414 const char *attrs_str = NULL;
37416 attrs = TREE_VALUE (TREE_VALUE (attrs));
37417 attrs_str = TREE_STRING_POINTER (attrs);
37419 /* Return priority zero for default function. Return the ISA needed for the
37420 function if it is not the default. */
37421 if (strcmp (attrs_str, "default") != 0)
37423 if (fn_opts == NULL_TREE)
37424 fn_opts = target_option_default_node;
37426 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37427 isa_masks = rs6000_isa_flags;
37428 else
37429 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37431 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37432 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37433 break;
37436 if (TARGET_DEBUG_TARGET)
37437 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37438 get_decl_name (fndecl), ret);
37440 return ret;
37443 /* This compares the priority of target features in function DECL1 and DECL2.
37444 It returns positive value if DECL1 is higher priority, negative value if
37445 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37446 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37448 static int
37449 rs6000_compare_version_priority (tree decl1, tree decl2)
37451 int priority1 = rs6000_clone_priority (decl1);
37452 int priority2 = rs6000_clone_priority (decl2);
37453 int ret = priority1 - priority2;
37455 if (TARGET_DEBUG_TARGET)
37456 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37457 get_decl_name (decl1), get_decl_name (decl2), ret);
37459 return ret;
37462 /* Make a dispatcher declaration for the multi-versioned function DECL.
37463 Calls to DECL function will be replaced with calls to the dispatcher
37464 by the front-end. Returns the decl of the dispatcher function. */
37466 static tree
37467 rs6000_get_function_versions_dispatcher (void *decl)
37469 tree fn = (tree) decl;
37470 struct cgraph_node *node = NULL;
37471 struct cgraph_node *default_node = NULL;
37472 struct cgraph_function_version_info *node_v = NULL;
37473 struct cgraph_function_version_info *first_v = NULL;
37475 tree dispatch_decl = NULL;
37477 struct cgraph_function_version_info *default_version_info = NULL;
37478 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37480 if (TARGET_DEBUG_TARGET)
37481 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37482 get_decl_name (fn));
37484 node = cgraph_node::get (fn);
37485 gcc_assert (node != NULL);
37487 node_v = node->function_version ();
37488 gcc_assert (node_v != NULL);
37490 if (node_v->dispatcher_resolver != NULL)
37491 return node_v->dispatcher_resolver;
37493 /* Find the default version and make it the first node. */
37494 first_v = node_v;
37495 /* Go to the beginning of the chain. */
37496 while (first_v->prev != NULL)
37497 first_v = first_v->prev;
37499 default_version_info = first_v;
37500 while (default_version_info != NULL)
37502 const tree decl2 = default_version_info->this_node->decl;
37503 if (is_function_default_version (decl2))
37504 break;
37505 default_version_info = default_version_info->next;
37508 /* If there is no default node, just return NULL. */
37509 if (default_version_info == NULL)
37510 return NULL;
37512 /* Make default info the first node. */
37513 if (first_v != default_version_info)
37515 default_version_info->prev->next = default_version_info->next;
37516 if (default_version_info->next)
37517 default_version_info->next->prev = default_version_info->prev;
37518 first_v->prev = default_version_info;
37519 default_version_info->next = first_v;
37520 default_version_info->prev = NULL;
37523 default_node = default_version_info->this_node;
37525 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37526 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37527 "target_clones attribute needs GLIBC (2.23 and newer) that "
37528 "exports hardware capability bits");
37529 #else
37531 if (targetm.has_ifunc_p ())
37533 struct cgraph_function_version_info *it_v = NULL;
37534 struct cgraph_node *dispatcher_node = NULL;
37535 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37537 /* Right now, the dispatching is done via ifunc. */
37538 dispatch_decl = make_dispatcher_decl (default_node->decl);
37540 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37541 gcc_assert (dispatcher_node != NULL);
37542 dispatcher_node->dispatcher_function = 1;
37543 dispatcher_version_info
37544 = dispatcher_node->insert_new_function_version ();
37545 dispatcher_version_info->next = default_version_info;
37546 dispatcher_node->definition = 1;
37548 /* Set the dispatcher for all the versions. */
37549 it_v = default_version_info;
37550 while (it_v != NULL)
37552 it_v->dispatcher_resolver = dispatch_decl;
37553 it_v = it_v->next;
37556 else
37558 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37559 "multiversioning needs ifunc which is not supported "
37560 "on this target");
37562 #endif
37564 return dispatch_decl;
37567 /* Make the resolver function decl to dispatch the versions of a multi-
37568 versioned function, DEFAULT_DECL. Create an empty basic block in the
37569 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37570 function. */
37572 static tree
37573 make_resolver_func (const tree default_decl,
37574 const tree dispatch_decl,
37575 basic_block *empty_bb)
37577 /* Make the resolver function static. The resolver function returns
37578 void *. */
37579 tree decl_name = clone_function_name (default_decl, "resolver");
37580 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37581 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37582 tree decl = build_fn_decl (resolver_name, type);
37583 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37585 DECL_NAME (decl) = decl_name;
37586 TREE_USED (decl) = 1;
37587 DECL_ARTIFICIAL (decl) = 1;
37588 DECL_IGNORED_P (decl) = 0;
37589 TREE_PUBLIC (decl) = 0;
37590 DECL_UNINLINABLE (decl) = 1;
37592 /* Resolver is not external, body is generated. */
37593 DECL_EXTERNAL (decl) = 0;
37594 DECL_EXTERNAL (dispatch_decl) = 0;
37596 DECL_CONTEXT (decl) = NULL_TREE;
37597 DECL_INITIAL (decl) = make_node (BLOCK);
37598 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37600 /* Build result decl and add to function_decl. */
37601 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37602 DECL_ARTIFICIAL (t) = 1;
37603 DECL_IGNORED_P (t) = 1;
37604 DECL_RESULT (decl) = t;
37606 gimplify_function_tree (decl);
37607 push_cfun (DECL_STRUCT_FUNCTION (decl));
37608 *empty_bb = init_lowered_empty_function (decl, false,
37609 profile_count::uninitialized ());
37611 cgraph_node::add_new_function (decl, true);
37612 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37614 pop_cfun ();
37616 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37617 DECL_ATTRIBUTES (dispatch_decl)
37618 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37620 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37622 return decl;
37625 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37626 return a pointer to VERSION_DECL if we are running on a machine that
37627 supports the index CLONE_ISA hardware architecture bits. This function will
37628 be called during version dispatch to decide which function version to
37629 execute. It returns the basic block at the end, to which more conditions
37630 can be added. */
37632 static basic_block
37633 add_condition_to_bb (tree function_decl, tree version_decl,
37634 int clone_isa, basic_block new_bb)
37636 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37638 gcc_assert (new_bb != NULL);
37639 gimple_seq gseq = bb_seq (new_bb);
37642 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37643 build_fold_addr_expr (version_decl));
37644 tree result_var = create_tmp_var (ptr_type_node);
37645 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37646 gimple *return_stmt = gimple_build_return (result_var);
37648 if (clone_isa == CLONE_DEFAULT)
37650 gimple_seq_add_stmt (&gseq, convert_stmt);
37651 gimple_seq_add_stmt (&gseq, return_stmt);
37652 set_bb_seq (new_bb, gseq);
37653 gimple_set_bb (convert_stmt, new_bb);
37654 gimple_set_bb (return_stmt, new_bb);
37655 pop_cfun ();
37656 return new_bb;
37659 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37660 tree cond_var = create_tmp_var (bool_int_type_node);
37661 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37662 const char *arg_str = rs6000_clone_map[clone_isa].name;
37663 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37664 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37665 gimple_call_set_lhs (call_cond_stmt, cond_var);
37667 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37668 gimple_set_bb (call_cond_stmt, new_bb);
37669 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37671 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37672 NULL_TREE, NULL_TREE);
37673 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37674 gimple_set_bb (if_else_stmt, new_bb);
37675 gimple_seq_add_stmt (&gseq, if_else_stmt);
37677 gimple_seq_add_stmt (&gseq, convert_stmt);
37678 gimple_seq_add_stmt (&gseq, return_stmt);
37679 set_bb_seq (new_bb, gseq);
37681 basic_block bb1 = new_bb;
37682 edge e12 = split_block (bb1, if_else_stmt);
37683 basic_block bb2 = e12->dest;
37684 e12->flags &= ~EDGE_FALLTHRU;
37685 e12->flags |= EDGE_TRUE_VALUE;
37687 edge e23 = split_block (bb2, return_stmt);
37688 gimple_set_bb (convert_stmt, bb2);
37689 gimple_set_bb (return_stmt, bb2);
37691 basic_block bb3 = e23->dest;
37692 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37694 remove_edge (e23);
37695 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37697 pop_cfun ();
37698 return bb3;
37701 /* This function generates the dispatch function for multi-versioned functions.
37702 DISPATCH_DECL is the function which will contain the dispatch logic.
37703 FNDECLS are the function choices for dispatch, and is a tree chain.
37704 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37705 code is generated. */
37707 static int
37708 dispatch_function_versions (tree dispatch_decl,
37709 void *fndecls_p,
37710 basic_block *empty_bb)
37712 int ix;
37713 tree ele;
37714 vec<tree> *fndecls;
37715 tree clones[CLONE_MAX];
37717 if (TARGET_DEBUG_TARGET)
37718 fputs ("dispatch_function_versions, top\n", stderr);
37720 gcc_assert (dispatch_decl != NULL
37721 && fndecls_p != NULL
37722 && empty_bb != NULL);
37724 /* fndecls_p is actually a vector. */
37725 fndecls = static_cast<vec<tree> *> (fndecls_p);
37727 /* At least one more version other than the default. */
37728 gcc_assert (fndecls->length () >= 2);
37730 /* The first version in the vector is the default decl. */
37731 memset ((void *) clones, '\0', sizeof (clones));
37732 clones[CLONE_DEFAULT] = (*fndecls)[0];
37734 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37735 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37736 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37737 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37738 to insert the code here to do the call. */
37740 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37742 int priority = rs6000_clone_priority (ele);
37743 if (!clones[priority])
37744 clones[priority] = ele;
37747 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37748 if (clones[ix])
37750 if (TARGET_DEBUG_TARGET)
37751 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37752 ix, get_decl_name (clones[ix]));
37754 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37755 *empty_bb);
37758 return 0;
37761 /* Generate the dispatching code body to dispatch multi-versioned function
37762 DECL. The target hook is called to process the "target" attributes and
37763 provide the code to dispatch the right function at run-time. NODE points
37764 to the dispatcher decl whose body will be created. */
37766 static tree
37767 rs6000_generate_version_dispatcher_body (void *node_p)
37769 tree resolver;
37770 basic_block empty_bb;
37771 struct cgraph_node *node = (cgraph_node *) node_p;
37772 struct cgraph_function_version_info *ninfo = node->function_version ();
37774 if (ninfo->dispatcher_resolver)
37775 return ninfo->dispatcher_resolver;
37777 /* node is going to be an alias, so remove the finalized bit. */
37778 node->definition = false;
37780 /* The first version in the chain corresponds to the default version. */
37781 ninfo->dispatcher_resolver = resolver
37782 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37784 if (TARGET_DEBUG_TARGET)
37785 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37786 get_decl_name (resolver));
37788 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37789 auto_vec<tree, 2> fn_ver_vec;
37791 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37792 vinfo;
37793 vinfo = vinfo->next)
37795 struct cgraph_node *version = vinfo->this_node;
37796 /* Check for virtual functions here again, as by this time it should
37797 have been determined if this function needs a vtable index or
37798 not. This happens for methods in derived classes that override
37799 virtual methods in base classes but are not explicitly marked as
37800 virtual. */
37801 if (DECL_VINDEX (version->decl))
37802 sorry ("Virtual function multiversioning not supported");
37804 fn_ver_vec.safe_push (version->decl);
37807 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37808 cgraph_edge::rebuild_edges ();
37809 pop_cfun ();
37810 return resolver;
37814 /* Hook to determine if one function can safely inline another. */
37816 static bool
37817 rs6000_can_inline_p (tree caller, tree callee)
37819 bool ret = false;
37820 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37821 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37823 /* If callee has no option attributes, then it is ok to inline. */
37824 if (!callee_tree)
37825 ret = true;
37827 /* If caller has no option attributes, but callee does then it is not ok to
37828 inline. */
37829 else if (!caller_tree)
37830 ret = false;
37832 else
37834 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37835 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37837 /* Callee's options should a subset of the caller's, i.e. a vsx function
37838 can inline an altivec function but a non-vsx function can't inline a
37839 vsx function. */
37840 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37841 == callee_opts->x_rs6000_isa_flags)
37842 ret = true;
37845 if (TARGET_DEBUG_TARGET)
37846 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37847 get_decl_name (caller), get_decl_name (callee),
37848 (ret ? "can" : "cannot"));
37850 return ret;
37853 /* Allocate a stack temp and fixup the address so it meets the particular
37854 memory requirements (either offetable or REG+REG addressing). */
37857 rs6000_allocate_stack_temp (machine_mode mode,
37858 bool offsettable_p,
37859 bool reg_reg_p)
37861 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37862 rtx addr = XEXP (stack, 0);
37863 int strict_p = reload_completed;
37865 if (!legitimate_indirect_address_p (addr, strict_p))
37867 if (offsettable_p
37868 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37869 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37871 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37872 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37875 return stack;
37878 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37879 to such a form to deal with memory reference instructions like STFIWX that
37880 only take reg+reg addressing. */
37883 rs6000_address_for_fpconvert (rtx x)
37885 rtx addr;
37887 gcc_assert (MEM_P (x));
37888 addr = XEXP (x, 0);
37889 if (can_create_pseudo_p ()
37890 && ! legitimate_indirect_address_p (addr, reload_completed)
37891 && ! legitimate_indexed_address_p (addr, reload_completed))
37893 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37895 rtx reg = XEXP (addr, 0);
37896 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37897 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37898 gcc_assert (REG_P (reg));
37899 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37900 addr = reg;
37902 else if (GET_CODE (addr) == PRE_MODIFY)
37904 rtx reg = XEXP (addr, 0);
37905 rtx expr = XEXP (addr, 1);
37906 gcc_assert (REG_P (reg));
37907 gcc_assert (GET_CODE (expr) == PLUS);
37908 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37909 addr = reg;
37912 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37915 return x;
37918 /* Given a memory reference, if it is not in the form for altivec memory
37919 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37920 convert to the altivec format. */
37923 rs6000_address_for_altivec (rtx x)
37925 gcc_assert (MEM_P (x));
37926 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37928 rtx addr = XEXP (x, 0);
37930 if (!legitimate_indexed_address_p (addr, reload_completed)
37931 && !legitimate_indirect_address_p (addr, reload_completed))
37932 addr = copy_to_mode_reg (Pmode, addr);
37934 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37935 x = change_address (x, GET_MODE (x), addr);
37938 return x;
37941 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37943 On the RS/6000, all integer constants are acceptable, most won't be valid
37944 for particular insns, though. Only easy FP constants are acceptable. */
37946 static bool
37947 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37949 if (TARGET_ELF && tls_referenced_p (x))
37950 return false;
37952 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37953 || GET_MODE (x) == VOIDmode
37954 || (TARGET_POWERPC64 && mode == DImode)
37955 || easy_fp_constant (x, mode)
37956 || easy_vector_constant (x, mode));
37960 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37962 static bool
37963 chain_already_loaded (rtx_insn *last)
37965 for (; last != NULL; last = PREV_INSN (last))
37967 if (NONJUMP_INSN_P (last))
37969 rtx patt = PATTERN (last);
37971 if (GET_CODE (patt) == SET)
37973 rtx lhs = XEXP (patt, 0);
37975 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37976 return true;
37980 return false;
37983 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37985 void
37986 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37988 const bool direct_call_p
37989 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37990 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37991 rtx toc_load = NULL_RTX;
37992 rtx toc_restore = NULL_RTX;
37993 rtx func_addr;
37994 rtx abi_reg = NULL_RTX;
37995 rtx call[4];
37996 int n_call;
37997 rtx insn;
37999 /* Handle longcall attributes. */
38000 if (INTVAL (cookie) & CALL_LONG)
38001 func_desc = rs6000_longcall_ref (func_desc);
38003 /* Handle indirect calls. */
38004 if (GET_CODE (func_desc) != SYMBOL_REF
38005 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
38007 /* Save the TOC into its reserved slot before the call,
38008 and prepare to restore it after the call. */
38009 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
38010 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
38011 rtx stack_toc_mem = gen_frame_mem (Pmode,
38012 gen_rtx_PLUS (Pmode, stack_ptr,
38013 stack_toc_offset));
38014 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
38015 gen_rtvec (1, stack_toc_offset),
38016 UNSPEC_TOCSLOT);
38017 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
38019 /* Can we optimize saving the TOC in the prologue or
38020 do we need to do it at every call? */
38021 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
38022 cfun->machine->save_toc_in_prologue = true;
38023 else
38025 MEM_VOLATILE_P (stack_toc_mem) = 1;
38026 emit_move_insn (stack_toc_mem, toc_reg);
38029 if (DEFAULT_ABI == ABI_ELFv2)
38031 /* A function pointer in the ELFv2 ABI is just a plain address, but
38032 the ABI requires it to be loaded into r12 before the call. */
38033 func_addr = gen_rtx_REG (Pmode, 12);
38034 emit_move_insn (func_addr, func_desc);
38035 abi_reg = func_addr;
38037 else
38039 /* A function pointer under AIX is a pointer to a data area whose
38040 first word contains the actual address of the function, whose
38041 second word contains a pointer to its TOC, and whose third word
38042 contains a value to place in the static chain register (r11).
38043 Note that if we load the static chain, our "trampoline" need
38044 not have any executable code. */
38046 /* Load up address of the actual function. */
38047 func_desc = force_reg (Pmode, func_desc);
38048 func_addr = gen_reg_rtx (Pmode);
38049 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
38051 /* Prepare to load the TOC of the called function. Note that the
38052 TOC load must happen immediately before the actual call so
38053 that unwinding the TOC registers works correctly. See the
38054 comment in frob_update_context. */
38055 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
38056 rtx func_toc_mem = gen_rtx_MEM (Pmode,
38057 gen_rtx_PLUS (Pmode, func_desc,
38058 func_toc_offset));
38059 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
38061 /* If we have a static chain, load it up. But, if the call was
38062 originally direct, the 3rd word has not been written since no
38063 trampoline has been built, so we ought not to load it, lest we
38064 override a static chain value. */
38065 if (!direct_call_p
38066 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38067 && !chain_already_loaded (get_current_sequence ()->next->last))
38069 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
38070 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
38071 rtx func_sc_mem = gen_rtx_MEM (Pmode,
38072 gen_rtx_PLUS (Pmode, func_desc,
38073 func_sc_offset));
38074 emit_move_insn (sc_reg, func_sc_mem);
38075 abi_reg = sc_reg;
38079 else
38081 /* Direct calls use the TOC: for local calls, the callee will
38082 assume the TOC register is set; for non-local calls, the
38083 PLT stub needs the TOC register. */
38084 abi_reg = toc_reg;
38085 func_addr = func_desc;
38088 /* Create the call. */
38089 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
38090 if (value != NULL_RTX)
38091 call[0] = gen_rtx_SET (value, call[0]);
38092 n_call = 1;
38094 if (toc_load)
38095 call[n_call++] = toc_load;
38096 if (toc_restore)
38097 call[n_call++] = toc_restore;
38099 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38101 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38102 insn = emit_call_insn (insn);
38104 /* Mention all registers defined by the ABI to hold information
38105 as uses in CALL_INSN_FUNCTION_USAGE. */
38106 if (abi_reg)
38107 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38110 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38112 void
38113 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38115 rtx call[2];
38116 rtx insn;
38118 gcc_assert (INTVAL (cookie) == 0);
38120 /* Create the call. */
38121 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
38122 if (value != NULL_RTX)
38123 call[0] = gen_rtx_SET (value, call[0]);
38125 call[1] = simple_return_rtx;
38127 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38128 insn = emit_call_insn (insn);
38130 /* Note use of the TOC register. */
38131 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38134 /* Return whether we need to always update the saved TOC pointer when we update
38135 the stack pointer. */
38137 static bool
38138 rs6000_save_toc_in_prologue_p (void)
38140 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38143 #ifdef HAVE_GAS_HIDDEN
38144 # define USE_HIDDEN_LINKONCE 1
38145 #else
38146 # define USE_HIDDEN_LINKONCE 0
38147 #endif
38149 /* Fills in the label name that should be used for a 476 link stack thunk. */
38151 void
38152 get_ppc476_thunk_name (char name[32])
38154 gcc_assert (TARGET_LINK_STACK);
38156 if (USE_HIDDEN_LINKONCE)
38157 sprintf (name, "__ppc476.get_thunk");
38158 else
38159 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38162 /* This function emits the simple thunk routine that is used to preserve
38163 the link stack on the 476 cpu. */
38165 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38166 static void
38167 rs6000_code_end (void)
38169 char name[32];
38170 tree decl;
38172 if (!TARGET_LINK_STACK)
38173 return;
38175 get_ppc476_thunk_name (name);
38177 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38178 build_function_type_list (void_type_node, NULL_TREE));
38179 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38180 NULL_TREE, void_type_node);
38181 TREE_PUBLIC (decl) = 1;
38182 TREE_STATIC (decl) = 1;
38184 #if RS6000_WEAK
38185 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38187 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38188 targetm.asm_out.unique_section (decl, 0);
38189 switch_to_section (get_named_section (decl, NULL, 0));
38190 DECL_WEAK (decl) = 1;
38191 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38192 targetm.asm_out.globalize_label (asm_out_file, name);
38193 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38194 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38196 else
38197 #endif
38199 switch_to_section (text_section);
38200 ASM_OUTPUT_LABEL (asm_out_file, name);
38203 DECL_INITIAL (decl) = make_node (BLOCK);
38204 current_function_decl = decl;
38205 allocate_struct_function (decl, false);
38206 init_function_start (decl);
38207 first_function_block_is_cold = false;
38208 /* Make sure unwind info is emitted for the thunk if needed. */
38209 final_start_function (emit_barrier (), asm_out_file, 1);
38211 fputs ("\tblr\n", asm_out_file);
38213 final_end_function ();
38214 init_insn_lengths ();
38215 free_after_compilation (cfun);
38216 set_cfun (NULL);
38217 current_function_decl = NULL;
38220 /* Add r30 to hard reg set if the prologue sets it up and it is not
38221 pic_offset_table_rtx. */
38223 static void
38224 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38226 if (!TARGET_SINGLE_PIC_BASE
38227 && TARGET_TOC
38228 && TARGET_MINIMAL_TOC
38229 && !constant_pool_empty_p ())
38230 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38231 if (cfun->machine->split_stack_argp_used)
38232 add_to_hard_reg_set (&set->set, Pmode, 12);
38234 /* Make sure the hard reg set doesn't include r2, which was possibly added
38235 via PIC_OFFSET_TABLE_REGNUM. */
38236 if (TARGET_TOC)
38237 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38241 /* Helper function for rs6000_split_logical to emit a logical instruction after
38242 spliting the operation to single GPR registers.
38244 DEST is the destination register.
38245 OP1 and OP2 are the input source registers.
38246 CODE is the base operation (AND, IOR, XOR, NOT).
38247 MODE is the machine mode.
38248 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38249 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38250 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38252 static void
38253 rs6000_split_logical_inner (rtx dest,
38254 rtx op1,
38255 rtx op2,
38256 enum rtx_code code,
38257 machine_mode mode,
38258 bool complement_final_p,
38259 bool complement_op1_p,
38260 bool complement_op2_p)
38262 rtx bool_rtx;
38264 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38265 if (op2 && GET_CODE (op2) == CONST_INT
38266 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38267 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38269 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38270 HOST_WIDE_INT value = INTVAL (op2) & mask;
38272 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38273 if (code == AND)
38275 if (value == 0)
38277 emit_insn (gen_rtx_SET (dest, const0_rtx));
38278 return;
38281 else if (value == mask)
38283 if (!rtx_equal_p (dest, op1))
38284 emit_insn (gen_rtx_SET (dest, op1));
38285 return;
38289 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38290 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38291 else if (code == IOR || code == XOR)
38293 if (value == 0)
38295 if (!rtx_equal_p (dest, op1))
38296 emit_insn (gen_rtx_SET (dest, op1));
38297 return;
38302 if (code == AND && mode == SImode
38303 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38305 emit_insn (gen_andsi3 (dest, op1, op2));
38306 return;
38309 if (complement_op1_p)
38310 op1 = gen_rtx_NOT (mode, op1);
38312 if (complement_op2_p)
38313 op2 = gen_rtx_NOT (mode, op2);
38315 /* For canonical RTL, if only one arm is inverted it is the first. */
38316 if (!complement_op1_p && complement_op2_p)
38317 std::swap (op1, op2);
38319 bool_rtx = ((code == NOT)
38320 ? gen_rtx_NOT (mode, op1)
38321 : gen_rtx_fmt_ee (code, mode, op1, op2));
38323 if (complement_final_p)
38324 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38326 emit_insn (gen_rtx_SET (dest, bool_rtx));
38329 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38330 operations are split immediately during RTL generation to allow for more
38331 optimizations of the AND/IOR/XOR.
38333 OPERANDS is an array containing the destination and two input operands.
38334 CODE is the base operation (AND, IOR, XOR, NOT).
38335 MODE is the machine mode.
38336 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38337 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38338 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38339 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38340 formation of the AND instructions. */
38342 static void
38343 rs6000_split_logical_di (rtx operands[3],
38344 enum rtx_code code,
38345 bool complement_final_p,
38346 bool complement_op1_p,
38347 bool complement_op2_p)
38349 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38350 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38351 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38352 enum hi_lo { hi = 0, lo = 1 };
38353 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38354 size_t i;
38356 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38357 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38358 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38359 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38361 if (code == NOT)
38362 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38363 else
38365 if (GET_CODE (operands[2]) != CONST_INT)
38367 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38368 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38370 else
38372 HOST_WIDE_INT value = INTVAL (operands[2]);
38373 HOST_WIDE_INT value_hi_lo[2];
38375 gcc_assert (!complement_final_p);
38376 gcc_assert (!complement_op1_p);
38377 gcc_assert (!complement_op2_p);
38379 value_hi_lo[hi] = value >> 32;
38380 value_hi_lo[lo] = value & lower_32bits;
38382 for (i = 0; i < 2; i++)
38384 HOST_WIDE_INT sub_value = value_hi_lo[i];
38386 if (sub_value & sign_bit)
38387 sub_value |= upper_32bits;
38389 op2_hi_lo[i] = GEN_INT (sub_value);
38391 /* If this is an AND instruction, check to see if we need to load
38392 the value in a register. */
38393 if (code == AND && sub_value != -1 && sub_value != 0
38394 && !and_operand (op2_hi_lo[i], SImode))
38395 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38400 for (i = 0; i < 2; i++)
38402 /* Split large IOR/XOR operations. */
38403 if ((code == IOR || code == XOR)
38404 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38405 && !complement_final_p
38406 && !complement_op1_p
38407 && !complement_op2_p
38408 && !logical_const_operand (op2_hi_lo[i], SImode))
38410 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38411 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38412 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38413 rtx tmp = gen_reg_rtx (SImode);
38415 /* Make sure the constant is sign extended. */
38416 if ((hi_16bits & sign_bit) != 0)
38417 hi_16bits |= upper_32bits;
38419 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38420 code, SImode, false, false, false);
38422 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38423 code, SImode, false, false, false);
38425 else
38426 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38427 code, SImode, complement_final_p,
38428 complement_op1_p, complement_op2_p);
38431 return;
38434 /* Split the insns that make up boolean operations operating on multiple GPR
38435 registers. The boolean MD patterns ensure that the inputs either are
38436 exactly the same as the output registers, or there is no overlap.
38438 OPERANDS is an array containing the destination and two input operands.
38439 CODE is the base operation (AND, IOR, XOR, NOT).
38440 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38441 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38442 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38444 void
38445 rs6000_split_logical (rtx operands[3],
38446 enum rtx_code code,
38447 bool complement_final_p,
38448 bool complement_op1_p,
38449 bool complement_op2_p)
38451 machine_mode mode = GET_MODE (operands[0]);
38452 machine_mode sub_mode;
38453 rtx op0, op1, op2;
38454 int sub_size, regno0, regno1, nregs, i;
38456 /* If this is DImode, use the specialized version that can run before
38457 register allocation. */
38458 if (mode == DImode && !TARGET_POWERPC64)
38460 rs6000_split_logical_di (operands, code, complement_final_p,
38461 complement_op1_p, complement_op2_p);
38462 return;
38465 op0 = operands[0];
38466 op1 = operands[1];
38467 op2 = (code == NOT) ? NULL_RTX : operands[2];
38468 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38469 sub_size = GET_MODE_SIZE (sub_mode);
38470 regno0 = REGNO (op0);
38471 regno1 = REGNO (op1);
38473 gcc_assert (reload_completed);
38474 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38475 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38477 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38478 gcc_assert (nregs > 1);
38480 if (op2 && REG_P (op2))
38481 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38483 for (i = 0; i < nregs; i++)
38485 int offset = i * sub_size;
38486 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38487 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38488 rtx sub_op2 = ((code == NOT)
38489 ? NULL_RTX
38490 : simplify_subreg (sub_mode, op2, mode, offset));
38492 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38493 complement_final_p, complement_op1_p,
38494 complement_op2_p);
38497 return;
38501 /* Return true if the peephole2 can combine a load involving a combination of
38502 an addis instruction and a load with an offset that can be fused together on
38503 a power8. */
38505 bool
38506 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38507 rtx addis_value, /* addis value. */
38508 rtx target, /* target register that is loaded. */
38509 rtx mem) /* bottom part of the memory addr. */
38511 rtx addr;
38512 rtx base_reg;
38514 /* Validate arguments. */
38515 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38516 return false;
38518 if (!base_reg_operand (target, GET_MODE (target)))
38519 return false;
38521 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38522 return false;
38524 /* Allow sign/zero extension. */
38525 if (GET_CODE (mem) == ZERO_EXTEND
38526 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38527 mem = XEXP (mem, 0);
38529 if (!MEM_P (mem))
38530 return false;
38532 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38533 return false;
38535 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38536 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38537 return false;
38539 /* Validate that the register used to load the high value is either the
38540 register being loaded, or we can safely replace its use.
38542 This function is only called from the peephole2 pass and we assume that
38543 there are 2 instructions in the peephole (addis and load), so we want to
38544 check if the target register was not used in the memory address and the
38545 register to hold the addis result is dead after the peephole. */
38546 if (REGNO (addis_reg) != REGNO (target))
38548 if (reg_mentioned_p (target, mem))
38549 return false;
38551 if (!peep2_reg_dead_p (2, addis_reg))
38552 return false;
38554 /* If the target register being loaded is the stack pointer, we must
38555 avoid loading any other value into it, even temporarily. */
38556 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38557 return false;
38560 base_reg = XEXP (addr, 0);
38561 return REGNO (addis_reg) == REGNO (base_reg);
38564 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38565 sequence. We adjust the addis register to use the target register. If the
38566 load sign extends, we adjust the code to do the zero extending load, and an
38567 explicit sign extension later since the fusion only covers zero extending
38568 loads.
38570 The operands are:
38571 operands[0] register set with addis (to be replaced with target)
38572 operands[1] value set via addis
38573 operands[2] target register being loaded
38574 operands[3] D-form memory reference using operands[0]. */
38576 void
38577 expand_fusion_gpr_load (rtx *operands)
38579 rtx addis_value = operands[1];
38580 rtx target = operands[2];
38581 rtx orig_mem = operands[3];
38582 rtx new_addr, new_mem, orig_addr, offset;
38583 enum rtx_code plus_or_lo_sum;
38584 machine_mode target_mode = GET_MODE (target);
38585 machine_mode extend_mode = target_mode;
38586 machine_mode ptr_mode = Pmode;
38587 enum rtx_code extend = UNKNOWN;
38589 if (GET_CODE (orig_mem) == ZERO_EXTEND
38590 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38592 extend = GET_CODE (orig_mem);
38593 orig_mem = XEXP (orig_mem, 0);
38594 target_mode = GET_MODE (orig_mem);
38597 gcc_assert (MEM_P (orig_mem));
38599 orig_addr = XEXP (orig_mem, 0);
38600 plus_or_lo_sum = GET_CODE (orig_addr);
38601 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38603 offset = XEXP (orig_addr, 1);
38604 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38605 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38607 if (extend != UNKNOWN)
38608 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38610 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38611 UNSPEC_FUSION_GPR);
38612 emit_insn (gen_rtx_SET (target, new_mem));
38614 if (extend == SIGN_EXTEND)
38616 int sub_off = ((BYTES_BIG_ENDIAN)
38617 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38618 : 0);
38619 rtx sign_reg
38620 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38622 emit_insn (gen_rtx_SET (target,
38623 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38626 return;
38629 /* Emit the addis instruction that will be part of a fused instruction
38630 sequence. */
38632 void
38633 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38634 const char *mode_name)
38636 rtx fuse_ops[10];
38637 char insn_template[80];
38638 const char *addis_str = NULL;
38639 const char *comment_str = ASM_COMMENT_START;
38641 if (*comment_str == ' ')
38642 comment_str++;
38644 /* Emit the addis instruction. */
38645 fuse_ops[0] = target;
38646 if (satisfies_constraint_L (addis_value))
38648 fuse_ops[1] = addis_value;
38649 addis_str = "lis %0,%v1";
38652 else if (GET_CODE (addis_value) == PLUS)
38654 rtx op0 = XEXP (addis_value, 0);
38655 rtx op1 = XEXP (addis_value, 1);
38657 if (REG_P (op0) && CONST_INT_P (op1)
38658 && satisfies_constraint_L (op1))
38660 fuse_ops[1] = op0;
38661 fuse_ops[2] = op1;
38662 addis_str = "addis %0,%1,%v2";
38666 else if (GET_CODE (addis_value) == HIGH)
38668 rtx value = XEXP (addis_value, 0);
38669 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38671 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38672 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38673 if (TARGET_ELF)
38674 addis_str = "addis %0,%2,%1@toc@ha";
38676 else if (TARGET_XCOFF)
38677 addis_str = "addis %0,%1@u(%2)";
38679 else
38680 gcc_unreachable ();
38683 else if (GET_CODE (value) == PLUS)
38685 rtx op0 = XEXP (value, 0);
38686 rtx op1 = XEXP (value, 1);
38688 if (GET_CODE (op0) == UNSPEC
38689 && XINT (op0, 1) == UNSPEC_TOCREL
38690 && CONST_INT_P (op1))
38692 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38693 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38694 fuse_ops[3] = op1;
38695 if (TARGET_ELF)
38696 addis_str = "addis %0,%2,%1+%3@toc@ha";
38698 else if (TARGET_XCOFF)
38699 addis_str = "addis %0,%1+%3@u(%2)";
38701 else
38702 gcc_unreachable ();
38706 else if (satisfies_constraint_L (value))
38708 fuse_ops[1] = value;
38709 addis_str = "lis %0,%v1";
38712 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38714 fuse_ops[1] = value;
38715 addis_str = "lis %0,%1@ha";
38719 if (!addis_str)
38720 fatal_insn ("Could not generate addis value for fusion", addis_value);
38722 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38723 comment, mode_name);
38724 output_asm_insn (insn_template, fuse_ops);
38727 /* Emit a D-form load or store instruction that is the second instruction
38728 of a fusion sequence. */
38730 void
38731 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38732 const char *insn_str)
38734 rtx fuse_ops[10];
38735 char insn_template[80];
38737 fuse_ops[0] = load_store_reg;
38738 fuse_ops[1] = addis_reg;
38740 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38742 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38743 fuse_ops[2] = offset;
38744 output_asm_insn (insn_template, fuse_ops);
38747 else if (GET_CODE (offset) == UNSPEC
38748 && XINT (offset, 1) == UNSPEC_TOCREL)
38750 if (TARGET_ELF)
38751 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38753 else if (TARGET_XCOFF)
38754 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38756 else
38757 gcc_unreachable ();
38759 fuse_ops[2] = XVECEXP (offset, 0, 0);
38760 output_asm_insn (insn_template, fuse_ops);
38763 else if (GET_CODE (offset) == PLUS
38764 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38765 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38766 && CONST_INT_P (XEXP (offset, 1)))
38768 rtx tocrel_unspec = XEXP (offset, 0);
38769 if (TARGET_ELF)
38770 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38772 else if (TARGET_XCOFF)
38773 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38775 else
38776 gcc_unreachable ();
38778 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38779 fuse_ops[3] = XEXP (offset, 1);
38780 output_asm_insn (insn_template, fuse_ops);
38783 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38785 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38787 fuse_ops[2] = offset;
38788 output_asm_insn (insn_template, fuse_ops);
38791 else
38792 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38794 return;
38797 /* Wrap a TOC address that can be fused to indicate that special fusion
38798 processing is needed. */
38801 fusion_wrap_memory_address (rtx old_mem)
38803 rtx old_addr = XEXP (old_mem, 0);
38804 rtvec v = gen_rtvec (1, old_addr);
38805 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38806 return replace_equiv_address_nv (old_mem, new_addr, false);
38809 /* Given an address, convert it into the addis and load offset parts. Addresses
38810 created during the peephole2 process look like:
38811 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38812 (unspec [(...)] UNSPEC_TOCREL))
38814 Addresses created via toc fusion look like:
38815 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38817 static void
38818 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38820 rtx hi, lo;
38822 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38824 lo = XVECEXP (addr, 0, 0);
38825 hi = gen_rtx_HIGH (Pmode, lo);
38827 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38829 hi = XEXP (addr, 0);
38830 lo = XEXP (addr, 1);
38832 else
38833 gcc_unreachable ();
38835 *p_hi = hi;
38836 *p_lo = lo;
38839 /* Return a string to fuse an addis instruction with a gpr load to the same
38840 register that we loaded up the addis instruction. The address that is used
38841 is the logical address that was formed during peephole2:
38842 (lo_sum (high) (low-part))
38844 Or the address is the TOC address that is wrapped before register allocation:
38845 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38847 The code is complicated, so we call output_asm_insn directly, and just
38848 return "". */
38850 const char *
38851 emit_fusion_gpr_load (rtx target, rtx mem)
38853 rtx addis_value;
38854 rtx addr;
38855 rtx load_offset;
38856 const char *load_str = NULL;
38857 const char *mode_name = NULL;
38858 machine_mode mode;
38860 if (GET_CODE (mem) == ZERO_EXTEND)
38861 mem = XEXP (mem, 0);
38863 gcc_assert (REG_P (target) && MEM_P (mem));
38865 addr = XEXP (mem, 0);
38866 fusion_split_address (addr, &addis_value, &load_offset);
38868 /* Now emit the load instruction to the same register. */
38869 mode = GET_MODE (mem);
38870 switch (mode)
38872 case E_QImode:
38873 mode_name = "char";
38874 load_str = "lbz";
38875 break;
38877 case E_HImode:
38878 mode_name = "short";
38879 load_str = "lhz";
38880 break;
38882 case E_SImode:
38883 case E_SFmode:
38884 mode_name = (mode == SFmode) ? "float" : "int";
38885 load_str = "lwz";
38886 break;
38888 case E_DImode:
38889 case E_DFmode:
38890 gcc_assert (TARGET_POWERPC64);
38891 mode_name = (mode == DFmode) ? "double" : "long";
38892 load_str = "ld";
38893 break;
38895 default:
38896 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38899 /* Emit the addis instruction. */
38900 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38902 /* Emit the D-form load instruction. */
38903 emit_fusion_load_store (target, target, load_offset, load_str);
38905 return "";
38909 /* Return true if the peephole2 can combine a load/store involving a
38910 combination of an addis instruction and the memory operation. This was
38911 added to the ISA 3.0 (power9) hardware. */
38913 bool
38914 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38915 rtx addis_value, /* addis value. */
38916 rtx dest, /* destination (memory or register). */
38917 rtx src) /* source (register or memory). */
38919 rtx addr, mem, offset;
38920 machine_mode mode = GET_MODE (src);
38922 /* Validate arguments. */
38923 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38924 return false;
38926 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38927 return false;
38929 /* Ignore extend operations that are part of the load. */
38930 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38931 src = XEXP (src, 0);
38933 /* Test for memory<-register or register<-memory. */
38934 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38936 if (!MEM_P (dest))
38937 return false;
38939 mem = dest;
38942 else if (MEM_P (src))
38944 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38945 return false;
38947 mem = src;
38950 else
38951 return false;
38953 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38954 if (GET_CODE (addr) == PLUS)
38956 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38957 return false;
38959 return satisfies_constraint_I (XEXP (addr, 1));
38962 else if (GET_CODE (addr) == LO_SUM)
38964 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38965 return false;
38967 offset = XEXP (addr, 1);
38968 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38969 return small_toc_ref (offset, GET_MODE (offset));
38971 else if (TARGET_ELF && !TARGET_POWERPC64)
38972 return CONSTANT_P (offset);
38975 return false;
38978 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38979 load sequence.
38981 The operands are:
38982 operands[0] register set with addis
38983 operands[1] value set via addis
38984 operands[2] target register being loaded
38985 operands[3] D-form memory reference using operands[0].
38987 This is similar to the fusion introduced with power8, except it scales to
38988 both loads/stores and does not require the result register to be the same as
38989 the base register. At the moment, we only do this if register set with addis
38990 is dead. */
38992 void
38993 expand_fusion_p9_load (rtx *operands)
38995 rtx tmp_reg = operands[0];
38996 rtx addis_value = operands[1];
38997 rtx target = operands[2];
38998 rtx orig_mem = operands[3];
38999 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
39000 enum rtx_code plus_or_lo_sum;
39001 machine_mode target_mode = GET_MODE (target);
39002 machine_mode extend_mode = target_mode;
39003 machine_mode ptr_mode = Pmode;
39004 enum rtx_code extend = UNKNOWN;
39006 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
39008 extend = GET_CODE (orig_mem);
39009 orig_mem = XEXP (orig_mem, 0);
39010 target_mode = GET_MODE (orig_mem);
39013 gcc_assert (MEM_P (orig_mem));
39015 orig_addr = XEXP (orig_mem, 0);
39016 plus_or_lo_sum = GET_CODE (orig_addr);
39017 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39019 offset = XEXP (orig_addr, 1);
39020 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39021 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39023 if (extend != UNKNOWN)
39024 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
39026 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
39027 UNSPEC_FUSION_P9);
39029 set = gen_rtx_SET (target, new_mem);
39030 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39031 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39032 emit_insn (insn);
39034 return;
39037 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39038 store sequence.
39040 The operands are:
39041 operands[0] register set with addis
39042 operands[1] value set via addis
39043 operands[2] target D-form memory being stored to
39044 operands[3] register being stored
39046 This is similar to the fusion introduced with power8, except it scales to
39047 both loads/stores and does not require the result register to be the same as
39048 the base register. At the moment, we only do this if register set with addis
39049 is dead. */
39051 void
39052 expand_fusion_p9_store (rtx *operands)
39054 rtx tmp_reg = operands[0];
39055 rtx addis_value = operands[1];
39056 rtx orig_mem = operands[2];
39057 rtx src = operands[3];
39058 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
39059 enum rtx_code plus_or_lo_sum;
39060 machine_mode target_mode = GET_MODE (orig_mem);
39061 machine_mode ptr_mode = Pmode;
39063 gcc_assert (MEM_P (orig_mem));
39065 orig_addr = XEXP (orig_mem, 0);
39066 plus_or_lo_sum = GET_CODE (orig_addr);
39067 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39069 offset = XEXP (orig_addr, 1);
39070 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39071 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39073 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
39074 UNSPEC_FUSION_P9);
39076 set = gen_rtx_SET (new_mem, new_src);
39077 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39078 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39079 emit_insn (insn);
39081 return;
39084 /* Return a string to fuse an addis instruction with a load using extended
39085 fusion. The address that is used is the logical address that was formed
39086 during peephole2: (lo_sum (high) (low-part))
39088 The code is complicated, so we call output_asm_insn directly, and just
39089 return "". */
39091 const char *
39092 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
39094 machine_mode mode = GET_MODE (reg);
39095 rtx hi;
39096 rtx lo;
39097 rtx addr;
39098 const char *load_string;
39099 int r;
39101 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
39103 mem = XEXP (mem, 0);
39104 mode = GET_MODE (mem);
39107 if (GET_CODE (reg) == SUBREG)
39109 gcc_assert (SUBREG_BYTE (reg) == 0);
39110 reg = SUBREG_REG (reg);
39113 if (!REG_P (reg))
39114 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
39116 r = REGNO (reg);
39117 if (FP_REGNO_P (r))
39119 if (mode == SFmode)
39120 load_string = "lfs";
39121 else if (mode == DFmode || mode == DImode)
39122 load_string = "lfd";
39123 else
39124 gcc_unreachable ();
39126 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39128 if (mode == SFmode)
39129 load_string = "lxssp";
39130 else if (mode == DFmode || mode == DImode)
39131 load_string = "lxsd";
39132 else
39133 gcc_unreachable ();
39135 else if (INT_REGNO_P (r))
39137 switch (mode)
39139 case E_QImode:
39140 load_string = "lbz";
39141 break;
39142 case E_HImode:
39143 load_string = "lhz";
39144 break;
39145 case E_SImode:
39146 case E_SFmode:
39147 load_string = "lwz";
39148 break;
39149 case E_DImode:
39150 case E_DFmode:
39151 if (!TARGET_POWERPC64)
39152 gcc_unreachable ();
39153 load_string = "ld";
39154 break;
39155 default:
39156 gcc_unreachable ();
39159 else
39160 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
39162 if (!MEM_P (mem))
39163 fatal_insn ("emit_fusion_p9_load not MEM", mem);
39165 addr = XEXP (mem, 0);
39166 fusion_split_address (addr, &hi, &lo);
39168 /* Emit the addis instruction. */
39169 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
39171 /* Emit the D-form load instruction. */
39172 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
39174 return "";
39177 /* Return a string to fuse an addis instruction with a store using extended
39178 fusion. The address that is used is the logical address that was formed
39179 during peephole2: (lo_sum (high) (low-part))
39181 The code is complicated, so we call output_asm_insn directly, and just
39182 return "". */
39184 const char *
39185 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
39187 machine_mode mode = GET_MODE (reg);
39188 rtx hi;
39189 rtx lo;
39190 rtx addr;
39191 const char *store_string;
39192 int r;
39194 if (GET_CODE (reg) == SUBREG)
39196 gcc_assert (SUBREG_BYTE (reg) == 0);
39197 reg = SUBREG_REG (reg);
39200 if (!REG_P (reg))
39201 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
39203 r = REGNO (reg);
39204 if (FP_REGNO_P (r))
39206 if (mode == SFmode)
39207 store_string = "stfs";
39208 else if (mode == DFmode)
39209 store_string = "stfd";
39210 else
39211 gcc_unreachable ();
39213 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39215 if (mode == SFmode)
39216 store_string = "stxssp";
39217 else if (mode == DFmode || mode == DImode)
39218 store_string = "stxsd";
39219 else
39220 gcc_unreachable ();
39222 else if (INT_REGNO_P (r))
39224 switch (mode)
39226 case E_QImode:
39227 store_string = "stb";
39228 break;
39229 case E_HImode:
39230 store_string = "sth";
39231 break;
39232 case E_SImode:
39233 case E_SFmode:
39234 store_string = "stw";
39235 break;
39236 case E_DImode:
39237 case E_DFmode:
39238 if (!TARGET_POWERPC64)
39239 gcc_unreachable ();
39240 store_string = "std";
39241 break;
39242 default:
39243 gcc_unreachable ();
39246 else
39247 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
39249 if (!MEM_P (mem))
39250 fatal_insn ("emit_fusion_p9_store not MEM", mem);
39252 addr = XEXP (mem, 0);
39253 fusion_split_address (addr, &hi, &lo);
39255 /* Emit the addis instruction. */
39256 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
39258 /* Emit the D-form load instruction. */
39259 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
39261 return "";
39264 #ifdef RS6000_GLIBC_ATOMIC_FENV
39265 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39266 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39267 #endif
39269 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39271 static void
39272 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39274 if (!TARGET_HARD_FLOAT)
39276 #ifdef RS6000_GLIBC_ATOMIC_FENV
39277 if (atomic_hold_decl == NULL_TREE)
39279 atomic_hold_decl
39280 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39281 get_identifier ("__atomic_feholdexcept"),
39282 build_function_type_list (void_type_node,
39283 double_ptr_type_node,
39284 NULL_TREE));
39285 TREE_PUBLIC (atomic_hold_decl) = 1;
39286 DECL_EXTERNAL (atomic_hold_decl) = 1;
39289 if (atomic_clear_decl == NULL_TREE)
39291 atomic_clear_decl
39292 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39293 get_identifier ("__atomic_feclearexcept"),
39294 build_function_type_list (void_type_node,
39295 NULL_TREE));
39296 TREE_PUBLIC (atomic_clear_decl) = 1;
39297 DECL_EXTERNAL (atomic_clear_decl) = 1;
39300 tree const_double = build_qualified_type (double_type_node,
39301 TYPE_QUAL_CONST);
39302 tree const_double_ptr = build_pointer_type (const_double);
39303 if (atomic_update_decl == NULL_TREE)
39305 atomic_update_decl
39306 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39307 get_identifier ("__atomic_feupdateenv"),
39308 build_function_type_list (void_type_node,
39309 const_double_ptr,
39310 NULL_TREE));
39311 TREE_PUBLIC (atomic_update_decl) = 1;
39312 DECL_EXTERNAL (atomic_update_decl) = 1;
39315 tree fenv_var = create_tmp_var_raw (double_type_node);
39316 TREE_ADDRESSABLE (fenv_var) = 1;
39317 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39319 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39320 *clear = build_call_expr (atomic_clear_decl, 0);
39321 *update = build_call_expr (atomic_update_decl, 1,
39322 fold_convert (const_double_ptr, fenv_addr));
39323 #endif
39324 return;
39327 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39328 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39329 tree call_mffs = build_call_expr (mffs, 0);
39331 /* Generates the equivalent of feholdexcept (&fenv_var)
39333 *fenv_var = __builtin_mffs ();
39334 double fenv_hold;
39335 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39336 __builtin_mtfsf (0xff, fenv_hold); */
39338 /* Mask to clear everything except for the rounding modes and non-IEEE
39339 arithmetic flag. */
39340 const unsigned HOST_WIDE_INT hold_exception_mask =
39341 HOST_WIDE_INT_C (0xffffffff00000007);
39343 tree fenv_var = create_tmp_var_raw (double_type_node);
39345 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39347 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39348 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39349 build_int_cst (uint64_type_node,
39350 hold_exception_mask));
39352 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39353 fenv_llu_and);
39355 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39356 build_int_cst (unsigned_type_node, 0xff),
39357 fenv_hold_mtfsf);
39359 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39361 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39363 double fenv_clear = __builtin_mffs ();
39364 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39365 __builtin_mtfsf (0xff, fenv_clear); */
39367 /* Mask to clear everything except for the rounding modes and non-IEEE
39368 arithmetic flag. */
39369 const unsigned HOST_WIDE_INT clear_exception_mask =
39370 HOST_WIDE_INT_C (0xffffffff00000000);
39372 tree fenv_clear = create_tmp_var_raw (double_type_node);
39374 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39376 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39377 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39378 fenv_clean_llu,
39379 build_int_cst (uint64_type_node,
39380 clear_exception_mask));
39382 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39383 fenv_clear_llu_and);
39385 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39386 build_int_cst (unsigned_type_node, 0xff),
39387 fenv_clear_mtfsf);
39389 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39391 /* Generates the equivalent of feupdateenv (&fenv_var)
39393 double old_fenv = __builtin_mffs ();
39394 double fenv_update;
39395 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39396 (*(uint64_t*)fenv_var 0x1ff80fff);
39397 __builtin_mtfsf (0xff, fenv_update); */
39399 const unsigned HOST_WIDE_INT update_exception_mask =
39400 HOST_WIDE_INT_C (0xffffffff1fffff00);
39401 const unsigned HOST_WIDE_INT new_exception_mask =
39402 HOST_WIDE_INT_C (0x1ff80fff);
39404 tree old_fenv = create_tmp_var_raw (double_type_node);
39405 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39407 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39408 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39409 build_int_cst (uint64_type_node,
39410 update_exception_mask));
39412 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39413 build_int_cst (uint64_type_node,
39414 new_exception_mask));
39416 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39417 old_llu_and, new_llu_and);
39419 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39420 new_llu_mask);
39422 tree update_mtfsf = build_call_expr (mtfsf, 2,
39423 build_int_cst (unsigned_type_node, 0xff),
39424 fenv_update_mtfsf);
39426 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39429 void
39430 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39432 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39434 rtx_tmp0 = gen_reg_rtx (V2DImode);
39435 rtx_tmp1 = gen_reg_rtx (V2DImode);
39437 /* The destination of the vmrgew instruction layout is:
39438 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39439 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39440 vmrgew instruction will be correct. */
39441 if (VECTOR_ELT_ORDER_BIG)
39443 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39444 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39446 else
39448 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39449 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39452 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39453 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39455 if (signed_convert)
39457 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39458 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39460 else
39462 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39463 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39466 if (VECTOR_ELT_ORDER_BIG)
39467 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39468 else
39469 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39472 void
39473 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39474 rtx src2)
39476 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39478 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39479 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39481 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39482 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39484 rtx_tmp2 = gen_reg_rtx (V4SImode);
39485 rtx_tmp3 = gen_reg_rtx (V4SImode);
39487 if (signed_convert)
39489 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39490 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39492 else
39494 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39495 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39498 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39501 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39503 static bool
39504 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39505 optimization_type opt_type)
39507 switch (op)
39509 case rsqrt_optab:
39510 return (opt_type == OPTIMIZE_FOR_SPEED
39511 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39513 default:
39514 return true;
39518 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39520 static HOST_WIDE_INT
39521 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39523 if (TREE_CODE (exp) == STRING_CST
39524 && (STRICT_ALIGNMENT || !optimize_size))
39525 return MAX (align, BITS_PER_WORD);
39526 return align;
39529 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39531 static HOST_WIDE_INT
39532 rs6000_starting_frame_offset (void)
39534 if (FRAME_GROWS_DOWNWARD)
39535 return 0;
39536 return RS6000_STARTING_FRAME_OFFSET;
39539 struct gcc_target targetm = TARGET_INITIALIZER;
39541 #include "gt-rs6000.h"