[gcc]
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
blob80499fd198ac1d2df3c5cf4e8702784610556d7b
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
90 systems will also set long double to be IEEE 128-bit. AIX and Darwin
91 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
92 those systems will not pick up this default. This needs to be after all
93 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
94 properly defined. */
95 #ifndef TARGET_IEEEQUAD_DEFAULT
96 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
97 #define TARGET_IEEEQUAD_DEFAULT 1
98 #else
99 #define TARGET_IEEEQUAD_DEFAULT 0
100 #endif
101 #endif
103 #define min(A,B) ((A) < (B) ? (A) : (B))
104 #define max(A,B) ((A) > (B) ? (A) : (B))
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
200 /* Width in bits of a pointer. */
201 unsigned rs6000_pointer_size;
203 #ifdef HAVE_AS_GNU_ATTRIBUTE
204 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
205 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
206 # endif
207 /* Flag whether floating point values have been passed/returned.
208 Note that this doesn't say whether fprs are used, since the
209 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
210 should be set for soft-float values passed in gprs and ieee128
211 values passed in vsx registers. */
212 static bool rs6000_passes_float;
213 static bool rs6000_passes_long_double;
214 /* Flag whether vector values have been passed/returned. */
215 static bool rs6000_passes_vector;
216 /* Flag whether small (<= 8 byte) structures have been returned. */
217 static bool rs6000_returns_struct;
218 #endif
220 /* Value is TRUE if register/mode pair is acceptable. */
221 static bool rs6000_hard_regno_mode_ok_p
222 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
224 /* Maximum number of registers needed for a given register class and mode. */
225 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
227 /* How many registers are needed for a given register and mode. */
228 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
230 /* Map register number to register class. */
231 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
233 static int dbg_cost_ctrl;
235 /* Built in types. */
236 tree rs6000_builtin_types[RS6000_BTI_MAX];
237 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
239 /* Flag to say the TOC is initialized */
240 int toc_initialized, need_toc_init;
241 char toc_label_name[10];
243 /* Cached value of rs6000_variable_issue. This is cached in
244 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
245 static short cached_can_issue_more;
247 static GTY(()) section *read_only_data_section;
248 static GTY(()) section *private_data_section;
249 static GTY(()) section *tls_data_section;
250 static GTY(()) section *tls_private_data_section;
251 static GTY(()) section *read_only_private_data_section;
252 static GTY(()) section *sdata2_section;
253 static GTY(()) section *toc_section;
255 struct builtin_description
257 const HOST_WIDE_INT mask;
258 const enum insn_code icode;
259 const char *const name;
260 const enum rs6000_builtins code;
263 /* Describe the vector unit used for modes. */
264 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
265 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
267 /* Register classes for various constraints that are based on the target
268 switches. */
269 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
271 /* Describe the alignment of a vector. */
272 int rs6000_vector_align[NUM_MACHINE_MODES];
274 /* Map selected modes to types for builtins. */
275 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
277 /* What modes to automatically generate reciprocal divide estimate (fre) and
278 reciprocal sqrt (frsqrte) for. */
279 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
281 /* Masks to determine which reciprocal esitmate instructions to generate
282 automatically. */
283 enum rs6000_recip_mask {
284 RECIP_SF_DIV = 0x001, /* Use divide estimate */
285 RECIP_DF_DIV = 0x002,
286 RECIP_V4SF_DIV = 0x004,
287 RECIP_V2DF_DIV = 0x008,
289 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
290 RECIP_DF_RSQRT = 0x020,
291 RECIP_V4SF_RSQRT = 0x040,
292 RECIP_V2DF_RSQRT = 0x080,
294 /* Various combination of flags for -mrecip=xxx. */
295 RECIP_NONE = 0,
296 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
297 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
298 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
300 RECIP_HIGH_PRECISION = RECIP_ALL,
302 /* On low precision machines like the power5, don't enable double precision
303 reciprocal square root estimate, since it isn't accurate enough. */
304 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
307 /* -mrecip options. */
308 static struct
310 const char *string; /* option name */
311 unsigned int mask; /* mask bits to set */
312 } recip_options[] = {
313 { "all", RECIP_ALL },
314 { "none", RECIP_NONE },
315 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
316 | RECIP_V2DF_DIV) },
317 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
318 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
319 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
320 | RECIP_V2DF_RSQRT) },
321 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
322 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
325 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
326 static const struct
328 const char *cpu;
329 unsigned int cpuid;
330 } cpu_is_info[] = {
331 { "power9", PPC_PLATFORM_POWER9 },
332 { "power8", PPC_PLATFORM_POWER8 },
333 { "power7", PPC_PLATFORM_POWER7 },
334 { "power6x", PPC_PLATFORM_POWER6X },
335 { "power6", PPC_PLATFORM_POWER6 },
336 { "power5+", PPC_PLATFORM_POWER5_PLUS },
337 { "power5", PPC_PLATFORM_POWER5 },
338 { "ppc970", PPC_PLATFORM_PPC970 },
339 { "power4", PPC_PLATFORM_POWER4 },
340 { "ppca2", PPC_PLATFORM_PPCA2 },
341 { "ppc476", PPC_PLATFORM_PPC476 },
342 { "ppc464", PPC_PLATFORM_PPC464 },
343 { "ppc440", PPC_PLATFORM_PPC440 },
344 { "ppc405", PPC_PLATFORM_PPC405 },
345 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
348 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
349 static const struct
351 const char *hwcap;
352 int mask;
353 unsigned int id;
354 } cpu_supports_info[] = {
355 /* AT_HWCAP masks. */
356 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
357 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
358 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
359 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
360 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
361 { "booke", PPC_FEATURE_BOOKE, 0 },
362 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
363 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
364 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
365 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
366 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
367 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
368 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
369 { "notb", PPC_FEATURE_NO_TB, 0 },
370 { "pa6t", PPC_FEATURE_PA6T, 0 },
371 { "power4", PPC_FEATURE_POWER4, 0 },
372 { "power5", PPC_FEATURE_POWER5, 0 },
373 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
374 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
375 { "ppc32", PPC_FEATURE_32, 0 },
376 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
377 { "ppc64", PPC_FEATURE_64, 0 },
378 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
379 { "smt", PPC_FEATURE_SMT, 0 },
380 { "spe", PPC_FEATURE_HAS_SPE, 0 },
381 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
382 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
383 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
385 /* AT_HWCAP2 masks. */
386 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
387 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
388 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
389 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
390 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
391 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
392 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
393 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
394 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
395 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
396 { "darn", PPC_FEATURE2_DARN, 1 },
397 { "scv", PPC_FEATURE2_SCV, 1 }
400 /* On PowerPC, we have a limited number of target clones that we care about
401 which means we can use an array to hold the options, rather than having more
402 elaborate data structures to identify each possible variation. Order the
403 clones from the default to the highest ISA. */
404 enum {
405 CLONE_DEFAULT = 0, /* default clone. */
406 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
407 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
408 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
409 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
410 CLONE_MAX
413 /* Map compiler ISA bits into HWCAP names. */
414 struct clone_map {
415 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
416 const char *name; /* name to use in __builtin_cpu_supports. */
419 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
420 { 0, "" }, /* Default options. */
421 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
422 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
423 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
424 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
428 /* Newer LIBCs explicitly export this symbol to declare that they provide
429 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
430 reference to this symbol whenever we expand a CPU builtin, so that
431 we never link against an old LIBC. */
432 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
434 /* True if we have expanded a CPU builtin. */
435 bool cpu_builtin_p;
437 /* Pointer to function (in rs6000-c.c) that can define or undefine target
438 macros that have changed. Languages that don't support the preprocessor
439 don't link in rs6000-c.c, so we can't call it directly. */
440 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
442 /* Simplfy register classes into simpler classifications. We assume
443 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
444 check for standard register classes (gpr/floating/altivec/vsx) and
445 floating/vector classes (float/altivec/vsx). */
447 enum rs6000_reg_type {
448 NO_REG_TYPE,
449 PSEUDO_REG_TYPE,
450 GPR_REG_TYPE,
451 VSX_REG_TYPE,
452 ALTIVEC_REG_TYPE,
453 FPR_REG_TYPE,
454 SPR_REG_TYPE,
455 CR_REG_TYPE
458 /* Map register class to register type. */
459 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
461 /* First/last register type for the 'normal' register types (i.e. general
462 purpose, floating point, altivec, and VSX registers). */
463 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
465 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
468 /* Register classes we care about in secondary reload or go if legitimate
469 address. We only need to worry about GPR, FPR, and Altivec registers here,
470 along an ANY field that is the OR of the 3 register classes. */
472 enum rs6000_reload_reg_type {
473 RELOAD_REG_GPR, /* General purpose registers. */
474 RELOAD_REG_FPR, /* Traditional floating point regs. */
475 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
476 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
477 N_RELOAD_REG
480 /* For setting up register classes, loop through the 3 register classes mapping
481 into real registers, and skip the ANY class, which is just an OR of the
482 bits. */
483 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
484 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
486 /* Map reload register type to a register in the register class. */
487 struct reload_reg_map_type {
488 const char *name; /* Register class name. */
489 int reg; /* Register in the register class. */
492 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
493 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
494 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
495 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
496 { "Any", -1 }, /* RELOAD_REG_ANY. */
499 /* Mask bits for each register class, indexed per mode. Historically the
500 compiler has been more restrictive which types can do PRE_MODIFY instead of
501 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
502 typedef unsigned char addr_mask_type;
504 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
505 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
506 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
507 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
508 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
509 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
510 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
511 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
513 /* Register type masks based on the type, of valid addressing modes. */
514 struct rs6000_reg_addr {
515 enum insn_code reload_load; /* INSN to reload for loading. */
516 enum insn_code reload_store; /* INSN to reload for storing. */
517 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
518 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
519 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
520 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
521 /* INSNs for fusing addi with loads
522 or stores for each reg. class. */
523 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
524 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
525 /* INSNs for fusing addis with loads
526 or stores for each reg. class. */
527 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
528 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
529 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
530 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
531 bool fused_toc; /* Mode supports TOC fusion. */
534 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
536 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
537 static inline bool
538 mode_supports_pre_incdec_p (machine_mode mode)
540 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
541 != 0);
544 /* Helper function to say whether a mode supports PRE_MODIFY. */
545 static inline bool
546 mode_supports_pre_modify_p (machine_mode mode)
548 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
549 != 0);
552 /* Given that there exists at least one variable that is set (produced)
553 by OUT_INSN and read (consumed) by IN_INSN, return true iff
554 IN_INSN represents one or more memory store operations and none of
555 the variables set by OUT_INSN is used by IN_INSN as the address of a
556 store operation. If either IN_INSN or OUT_INSN does not represent
557 a "single" RTL SET expression (as loosely defined by the
558 implementation of the single_set function) or a PARALLEL with only
559 SETs, CLOBBERs, and USEs inside, this function returns false.
561 This rs6000-specific version of store_data_bypass_p checks for
562 certain conditions that result in assertion failures (and internal
563 compiler errors) in the generic store_data_bypass_p function and
564 returns false rather than calling store_data_bypass_p if one of the
565 problematic conditions is detected. */
568 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
570 rtx out_set, in_set;
571 rtx out_pat, in_pat;
572 rtx out_exp, in_exp;
573 int i, j;
575 in_set = single_set (in_insn);
576 if (in_set)
578 if (MEM_P (SET_DEST (in_set)))
580 out_set = single_set (out_insn);
581 if (!out_set)
583 out_pat = PATTERN (out_insn);
584 if (GET_CODE (out_pat) == PARALLEL)
586 for (i = 0; i < XVECLEN (out_pat, 0); i++)
588 out_exp = XVECEXP (out_pat, 0, i);
589 if ((GET_CODE (out_exp) == CLOBBER)
590 || (GET_CODE (out_exp) == USE))
591 continue;
592 else if (GET_CODE (out_exp) != SET)
593 return false;
599 else
601 in_pat = PATTERN (in_insn);
602 if (GET_CODE (in_pat) != PARALLEL)
603 return false;
605 for (i = 0; i < XVECLEN (in_pat, 0); i++)
607 in_exp = XVECEXP (in_pat, 0, i);
608 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
609 continue;
610 else if (GET_CODE (in_exp) != SET)
611 return false;
613 if (MEM_P (SET_DEST (in_exp)))
615 out_set = single_set (out_insn);
616 if (!out_set)
618 out_pat = PATTERN (out_insn);
619 if (GET_CODE (out_pat) != PARALLEL)
620 return false;
621 for (j = 0; j < XVECLEN (out_pat, 0); j++)
623 out_exp = XVECEXP (out_pat, 0, j);
624 if ((GET_CODE (out_exp) == CLOBBER)
625 || (GET_CODE (out_exp) == USE))
626 continue;
627 else if (GET_CODE (out_exp) != SET)
628 return false;
634 return store_data_bypass_p (out_insn, in_insn);
637 /* Return true if we have D-form addressing in altivec registers. */
638 static inline bool
639 mode_supports_vmx_dform (machine_mode mode)
641 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
644 /* Return true if we have D-form addressing in VSX registers. This addressing
645 is more limited than normal d-form addressing in that the offset must be
646 aligned on a 16-byte boundary. */
647 static inline bool
648 mode_supports_vsx_dform_quad (machine_mode mode)
650 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
651 != 0);
655 /* Processor costs (relative to an add) */
657 const struct processor_costs *rs6000_cost;
659 /* Instruction size costs on 32bit processors. */
660 static const
661 struct processor_costs size32_cost = {
662 COSTS_N_INSNS (1), /* mulsi */
663 COSTS_N_INSNS (1), /* mulsi_const */
664 COSTS_N_INSNS (1), /* mulsi_const9 */
665 COSTS_N_INSNS (1), /* muldi */
666 COSTS_N_INSNS (1), /* divsi */
667 COSTS_N_INSNS (1), /* divdi */
668 COSTS_N_INSNS (1), /* fp */
669 COSTS_N_INSNS (1), /* dmul */
670 COSTS_N_INSNS (1), /* sdiv */
671 COSTS_N_INSNS (1), /* ddiv */
672 32, /* cache line size */
673 0, /* l1 cache */
674 0, /* l2 cache */
675 0, /* streams */
676 0, /* SF->DF convert */
679 /* Instruction size costs on 64bit processors. */
680 static const
681 struct processor_costs size64_cost = {
682 COSTS_N_INSNS (1), /* mulsi */
683 COSTS_N_INSNS (1), /* mulsi_const */
684 COSTS_N_INSNS (1), /* mulsi_const9 */
685 COSTS_N_INSNS (1), /* muldi */
686 COSTS_N_INSNS (1), /* divsi */
687 COSTS_N_INSNS (1), /* divdi */
688 COSTS_N_INSNS (1), /* fp */
689 COSTS_N_INSNS (1), /* dmul */
690 COSTS_N_INSNS (1), /* sdiv */
691 COSTS_N_INSNS (1), /* ddiv */
692 128, /* cache line size */
693 0, /* l1 cache */
694 0, /* l2 cache */
695 0, /* streams */
696 0, /* SF->DF convert */
699 /* Instruction costs on RS64A processors. */
700 static const
701 struct processor_costs rs64a_cost = {
702 COSTS_N_INSNS (20), /* mulsi */
703 COSTS_N_INSNS (12), /* mulsi_const */
704 COSTS_N_INSNS (8), /* mulsi_const9 */
705 COSTS_N_INSNS (34), /* muldi */
706 COSTS_N_INSNS (65), /* divsi */
707 COSTS_N_INSNS (67), /* divdi */
708 COSTS_N_INSNS (4), /* fp */
709 COSTS_N_INSNS (4), /* dmul */
710 COSTS_N_INSNS (31), /* sdiv */
711 COSTS_N_INSNS (31), /* ddiv */
712 128, /* cache line size */
713 128, /* l1 cache */
714 2048, /* l2 cache */
715 1, /* streams */
716 0, /* SF->DF convert */
719 /* Instruction costs on MPCCORE processors. */
720 static const
721 struct processor_costs mpccore_cost = {
722 COSTS_N_INSNS (2), /* mulsi */
723 COSTS_N_INSNS (2), /* mulsi_const */
724 COSTS_N_INSNS (2), /* mulsi_const9 */
725 COSTS_N_INSNS (2), /* muldi */
726 COSTS_N_INSNS (6), /* divsi */
727 COSTS_N_INSNS (6), /* divdi */
728 COSTS_N_INSNS (4), /* fp */
729 COSTS_N_INSNS (5), /* dmul */
730 COSTS_N_INSNS (10), /* sdiv */
731 COSTS_N_INSNS (17), /* ddiv */
732 32, /* cache line size */
733 4, /* l1 cache */
734 16, /* l2 cache */
735 1, /* streams */
736 0, /* SF->DF convert */
739 /* Instruction costs on PPC403 processors. */
740 static const
741 struct processor_costs ppc403_cost = {
742 COSTS_N_INSNS (4), /* mulsi */
743 COSTS_N_INSNS (4), /* mulsi_const */
744 COSTS_N_INSNS (4), /* mulsi_const9 */
745 COSTS_N_INSNS (4), /* muldi */
746 COSTS_N_INSNS (33), /* divsi */
747 COSTS_N_INSNS (33), /* divdi */
748 COSTS_N_INSNS (11), /* fp */
749 COSTS_N_INSNS (11), /* dmul */
750 COSTS_N_INSNS (11), /* sdiv */
751 COSTS_N_INSNS (11), /* ddiv */
752 32, /* cache line size */
753 4, /* l1 cache */
754 16, /* l2 cache */
755 1, /* streams */
756 0, /* SF->DF convert */
759 /* Instruction costs on PPC405 processors. */
760 static const
761 struct processor_costs ppc405_cost = {
762 COSTS_N_INSNS (5), /* mulsi */
763 COSTS_N_INSNS (4), /* mulsi_const */
764 COSTS_N_INSNS (3), /* mulsi_const9 */
765 COSTS_N_INSNS (5), /* muldi */
766 COSTS_N_INSNS (35), /* divsi */
767 COSTS_N_INSNS (35), /* divdi */
768 COSTS_N_INSNS (11), /* fp */
769 COSTS_N_INSNS (11), /* dmul */
770 COSTS_N_INSNS (11), /* sdiv */
771 COSTS_N_INSNS (11), /* ddiv */
772 32, /* cache line size */
773 16, /* l1 cache */
774 128, /* l2 cache */
775 1, /* streams */
776 0, /* SF->DF convert */
779 /* Instruction costs on PPC440 processors. */
780 static const
781 struct processor_costs ppc440_cost = {
782 COSTS_N_INSNS (3), /* mulsi */
783 COSTS_N_INSNS (2), /* mulsi_const */
784 COSTS_N_INSNS (2), /* mulsi_const9 */
785 COSTS_N_INSNS (3), /* muldi */
786 COSTS_N_INSNS (34), /* divsi */
787 COSTS_N_INSNS (34), /* divdi */
788 COSTS_N_INSNS (5), /* fp */
789 COSTS_N_INSNS (5), /* dmul */
790 COSTS_N_INSNS (19), /* sdiv */
791 COSTS_N_INSNS (33), /* ddiv */
792 32, /* cache line size */
793 32, /* l1 cache */
794 256, /* l2 cache */
795 1, /* streams */
796 0, /* SF->DF convert */
799 /* Instruction costs on PPC476 processors. */
800 static const
801 struct processor_costs ppc476_cost = {
802 COSTS_N_INSNS (4), /* mulsi */
803 COSTS_N_INSNS (4), /* mulsi_const */
804 COSTS_N_INSNS (4), /* mulsi_const9 */
805 COSTS_N_INSNS (4), /* muldi */
806 COSTS_N_INSNS (11), /* divsi */
807 COSTS_N_INSNS (11), /* divdi */
808 COSTS_N_INSNS (6), /* fp */
809 COSTS_N_INSNS (6), /* dmul */
810 COSTS_N_INSNS (19), /* sdiv */
811 COSTS_N_INSNS (33), /* ddiv */
812 32, /* l1 cache line size */
813 32, /* l1 cache */
814 512, /* l2 cache */
815 1, /* streams */
816 0, /* SF->DF convert */
819 /* Instruction costs on PPC601 processors. */
820 static const
821 struct processor_costs ppc601_cost = {
822 COSTS_N_INSNS (5), /* mulsi */
823 COSTS_N_INSNS (5), /* mulsi_const */
824 COSTS_N_INSNS (5), /* mulsi_const9 */
825 COSTS_N_INSNS (5), /* muldi */
826 COSTS_N_INSNS (36), /* divsi */
827 COSTS_N_INSNS (36), /* divdi */
828 COSTS_N_INSNS (4), /* fp */
829 COSTS_N_INSNS (5), /* dmul */
830 COSTS_N_INSNS (17), /* sdiv */
831 COSTS_N_INSNS (31), /* ddiv */
832 32, /* cache line size */
833 32, /* l1 cache */
834 256, /* l2 cache */
835 1, /* streams */
836 0, /* SF->DF convert */
839 /* Instruction costs on PPC603 processors. */
840 static const
841 struct processor_costs ppc603_cost = {
842 COSTS_N_INSNS (5), /* mulsi */
843 COSTS_N_INSNS (3), /* mulsi_const */
844 COSTS_N_INSNS (2), /* mulsi_const9 */
845 COSTS_N_INSNS (5), /* muldi */
846 COSTS_N_INSNS (37), /* divsi */
847 COSTS_N_INSNS (37), /* divdi */
848 COSTS_N_INSNS (3), /* fp */
849 COSTS_N_INSNS (4), /* dmul */
850 COSTS_N_INSNS (18), /* sdiv */
851 COSTS_N_INSNS (33), /* ddiv */
852 32, /* cache line size */
853 8, /* l1 cache */
854 64, /* l2 cache */
855 1, /* streams */
856 0, /* SF->DF convert */
859 /* Instruction costs on PPC604 processors. */
860 static const
861 struct processor_costs ppc604_cost = {
862 COSTS_N_INSNS (4), /* mulsi */
863 COSTS_N_INSNS (4), /* mulsi_const */
864 COSTS_N_INSNS (4), /* mulsi_const9 */
865 COSTS_N_INSNS (4), /* muldi */
866 COSTS_N_INSNS (20), /* divsi */
867 COSTS_N_INSNS (20), /* divdi */
868 COSTS_N_INSNS (3), /* fp */
869 COSTS_N_INSNS (3), /* dmul */
870 COSTS_N_INSNS (18), /* sdiv */
871 COSTS_N_INSNS (32), /* ddiv */
872 32, /* cache line size */
873 16, /* l1 cache */
874 512, /* l2 cache */
875 1, /* streams */
876 0, /* SF->DF convert */
879 /* Instruction costs on PPC604e processors. */
880 static const
881 struct processor_costs ppc604e_cost = {
882 COSTS_N_INSNS (2), /* mulsi */
883 COSTS_N_INSNS (2), /* mulsi_const */
884 COSTS_N_INSNS (2), /* mulsi_const9 */
885 COSTS_N_INSNS (2), /* muldi */
886 COSTS_N_INSNS (20), /* divsi */
887 COSTS_N_INSNS (20), /* divdi */
888 COSTS_N_INSNS (3), /* fp */
889 COSTS_N_INSNS (3), /* dmul */
890 COSTS_N_INSNS (18), /* sdiv */
891 COSTS_N_INSNS (32), /* ddiv */
892 32, /* cache line size */
893 32, /* l1 cache */
894 1024, /* l2 cache */
895 1, /* streams */
896 0, /* SF->DF convert */
899 /* Instruction costs on PPC620 processors. */
900 static const
901 struct processor_costs ppc620_cost = {
902 COSTS_N_INSNS (5), /* mulsi */
903 COSTS_N_INSNS (4), /* mulsi_const */
904 COSTS_N_INSNS (3), /* mulsi_const9 */
905 COSTS_N_INSNS (7), /* muldi */
906 COSTS_N_INSNS (21), /* divsi */
907 COSTS_N_INSNS (37), /* divdi */
908 COSTS_N_INSNS (3), /* fp */
909 COSTS_N_INSNS (3), /* dmul */
910 COSTS_N_INSNS (18), /* sdiv */
911 COSTS_N_INSNS (32), /* ddiv */
912 128, /* cache line size */
913 32, /* l1 cache */
914 1024, /* l2 cache */
915 1, /* streams */
916 0, /* SF->DF convert */
919 /* Instruction costs on PPC630 processors. */
920 static const
921 struct processor_costs ppc630_cost = {
922 COSTS_N_INSNS (5), /* mulsi */
923 COSTS_N_INSNS (4), /* mulsi_const */
924 COSTS_N_INSNS (3), /* mulsi_const9 */
925 COSTS_N_INSNS (7), /* muldi */
926 COSTS_N_INSNS (21), /* divsi */
927 COSTS_N_INSNS (37), /* divdi */
928 COSTS_N_INSNS (3), /* fp */
929 COSTS_N_INSNS (3), /* dmul */
930 COSTS_N_INSNS (17), /* sdiv */
931 COSTS_N_INSNS (21), /* ddiv */
932 128, /* cache line size */
933 64, /* l1 cache */
934 1024, /* l2 cache */
935 1, /* streams */
936 0, /* SF->DF convert */
939 /* Instruction costs on Cell processor. */
940 /* COSTS_N_INSNS (1) ~ one add. */
941 static const
942 struct processor_costs ppccell_cost = {
943 COSTS_N_INSNS (9/2)+2, /* mulsi */
944 COSTS_N_INSNS (6/2), /* mulsi_const */
945 COSTS_N_INSNS (6/2), /* mulsi_const9 */
946 COSTS_N_INSNS (15/2)+2, /* muldi */
947 COSTS_N_INSNS (38/2), /* divsi */
948 COSTS_N_INSNS (70/2), /* divdi */
949 COSTS_N_INSNS (10/2), /* fp */
950 COSTS_N_INSNS (10/2), /* dmul */
951 COSTS_N_INSNS (74/2), /* sdiv */
952 COSTS_N_INSNS (74/2), /* ddiv */
953 128, /* cache line size */
954 32, /* l1 cache */
955 512, /* l2 cache */
956 6, /* streams */
957 0, /* SF->DF convert */
960 /* Instruction costs on PPC750 and PPC7400 processors. */
961 static const
962 struct processor_costs ppc750_cost = {
963 COSTS_N_INSNS (5), /* mulsi */
964 COSTS_N_INSNS (3), /* mulsi_const */
965 COSTS_N_INSNS (2), /* mulsi_const9 */
966 COSTS_N_INSNS (5), /* muldi */
967 COSTS_N_INSNS (17), /* divsi */
968 COSTS_N_INSNS (17), /* divdi */
969 COSTS_N_INSNS (3), /* fp */
970 COSTS_N_INSNS (3), /* dmul */
971 COSTS_N_INSNS (17), /* sdiv */
972 COSTS_N_INSNS (31), /* ddiv */
973 32, /* cache line size */
974 32, /* l1 cache */
975 512, /* l2 cache */
976 1, /* streams */
977 0, /* SF->DF convert */
980 /* Instruction costs on PPC7450 processors. */
981 static const
982 struct processor_costs ppc7450_cost = {
983 COSTS_N_INSNS (4), /* mulsi */
984 COSTS_N_INSNS (3), /* mulsi_const */
985 COSTS_N_INSNS (3), /* mulsi_const9 */
986 COSTS_N_INSNS (4), /* muldi */
987 COSTS_N_INSNS (23), /* divsi */
988 COSTS_N_INSNS (23), /* divdi */
989 COSTS_N_INSNS (5), /* fp */
990 COSTS_N_INSNS (5), /* dmul */
991 COSTS_N_INSNS (21), /* sdiv */
992 COSTS_N_INSNS (35), /* ddiv */
993 32, /* cache line size */
994 32, /* l1 cache */
995 1024, /* l2 cache */
996 1, /* streams */
997 0, /* SF->DF convert */
1000 /* Instruction costs on PPC8540 processors. */
1001 static const
1002 struct processor_costs ppc8540_cost = {
1003 COSTS_N_INSNS (4), /* mulsi */
1004 COSTS_N_INSNS (4), /* mulsi_const */
1005 COSTS_N_INSNS (4), /* mulsi_const9 */
1006 COSTS_N_INSNS (4), /* muldi */
1007 COSTS_N_INSNS (19), /* divsi */
1008 COSTS_N_INSNS (19), /* divdi */
1009 COSTS_N_INSNS (4), /* fp */
1010 COSTS_N_INSNS (4), /* dmul */
1011 COSTS_N_INSNS (29), /* sdiv */
1012 COSTS_N_INSNS (29), /* ddiv */
1013 32, /* cache line size */
1014 32, /* l1 cache */
1015 256, /* l2 cache */
1016 1, /* prefetch streams /*/
1017 0, /* SF->DF convert */
1020 /* Instruction costs on E300C2 and E300C3 cores. */
1021 static const
1022 struct processor_costs ppce300c2c3_cost = {
1023 COSTS_N_INSNS (4), /* mulsi */
1024 COSTS_N_INSNS (4), /* mulsi_const */
1025 COSTS_N_INSNS (4), /* mulsi_const9 */
1026 COSTS_N_INSNS (4), /* muldi */
1027 COSTS_N_INSNS (19), /* divsi */
1028 COSTS_N_INSNS (19), /* divdi */
1029 COSTS_N_INSNS (3), /* fp */
1030 COSTS_N_INSNS (4), /* dmul */
1031 COSTS_N_INSNS (18), /* sdiv */
1032 COSTS_N_INSNS (33), /* ddiv */
1034 16, /* l1 cache */
1035 16, /* l2 cache */
1036 1, /* prefetch streams /*/
1037 0, /* SF->DF convert */
1040 /* Instruction costs on PPCE500MC processors. */
1041 static const
1042 struct processor_costs ppce500mc_cost = {
1043 COSTS_N_INSNS (4), /* mulsi */
1044 COSTS_N_INSNS (4), /* mulsi_const */
1045 COSTS_N_INSNS (4), /* mulsi_const9 */
1046 COSTS_N_INSNS (4), /* muldi */
1047 COSTS_N_INSNS (14), /* divsi */
1048 COSTS_N_INSNS (14), /* divdi */
1049 COSTS_N_INSNS (8), /* fp */
1050 COSTS_N_INSNS (10), /* dmul */
1051 COSTS_N_INSNS (36), /* sdiv */
1052 COSTS_N_INSNS (66), /* ddiv */
1053 64, /* cache line size */
1054 32, /* l1 cache */
1055 128, /* l2 cache */
1056 1, /* prefetch streams /*/
1057 0, /* SF->DF convert */
1060 /* Instruction costs on PPCE500MC64 processors. */
1061 static const
1062 struct processor_costs ppce500mc64_cost = {
1063 COSTS_N_INSNS (4), /* mulsi */
1064 COSTS_N_INSNS (4), /* mulsi_const */
1065 COSTS_N_INSNS (4), /* mulsi_const9 */
1066 COSTS_N_INSNS (4), /* muldi */
1067 COSTS_N_INSNS (14), /* divsi */
1068 COSTS_N_INSNS (14), /* divdi */
1069 COSTS_N_INSNS (4), /* fp */
1070 COSTS_N_INSNS (10), /* dmul */
1071 COSTS_N_INSNS (36), /* sdiv */
1072 COSTS_N_INSNS (66), /* ddiv */
1073 64, /* cache line size */
1074 32, /* l1 cache */
1075 128, /* l2 cache */
1076 1, /* prefetch streams /*/
1077 0, /* SF->DF convert */
1080 /* Instruction costs on PPCE5500 processors. */
1081 static const
1082 struct processor_costs ppce5500_cost = {
1083 COSTS_N_INSNS (5), /* mulsi */
1084 COSTS_N_INSNS (5), /* mulsi_const */
1085 COSTS_N_INSNS (4), /* mulsi_const9 */
1086 COSTS_N_INSNS (5), /* muldi */
1087 COSTS_N_INSNS (14), /* divsi */
1088 COSTS_N_INSNS (14), /* divdi */
1089 COSTS_N_INSNS (7), /* fp */
1090 COSTS_N_INSNS (10), /* dmul */
1091 COSTS_N_INSNS (36), /* sdiv */
1092 COSTS_N_INSNS (66), /* ddiv */
1093 64, /* cache line size */
1094 32, /* l1 cache */
1095 128, /* l2 cache */
1096 1, /* prefetch streams /*/
1097 0, /* SF->DF convert */
1100 /* Instruction costs on PPCE6500 processors. */
1101 static const
1102 struct processor_costs ppce6500_cost = {
1103 COSTS_N_INSNS (5), /* mulsi */
1104 COSTS_N_INSNS (5), /* mulsi_const */
1105 COSTS_N_INSNS (4), /* mulsi_const9 */
1106 COSTS_N_INSNS (5), /* muldi */
1107 COSTS_N_INSNS (14), /* divsi */
1108 COSTS_N_INSNS (14), /* divdi */
1109 COSTS_N_INSNS (7), /* fp */
1110 COSTS_N_INSNS (10), /* dmul */
1111 COSTS_N_INSNS (36), /* sdiv */
1112 COSTS_N_INSNS (66), /* ddiv */
1113 64, /* cache line size */
1114 32, /* l1 cache */
1115 128, /* l2 cache */
1116 1, /* prefetch streams /*/
1117 0, /* SF->DF convert */
1120 /* Instruction costs on AppliedMicro Titan processors. */
1121 static const
1122 struct processor_costs titan_cost = {
1123 COSTS_N_INSNS (5), /* mulsi */
1124 COSTS_N_INSNS (5), /* mulsi_const */
1125 COSTS_N_INSNS (5), /* mulsi_const9 */
1126 COSTS_N_INSNS (5), /* muldi */
1127 COSTS_N_INSNS (18), /* divsi */
1128 COSTS_N_INSNS (18), /* divdi */
1129 COSTS_N_INSNS (10), /* fp */
1130 COSTS_N_INSNS (10), /* dmul */
1131 COSTS_N_INSNS (46), /* sdiv */
1132 COSTS_N_INSNS (72), /* ddiv */
1133 32, /* cache line size */
1134 32, /* l1 cache */
1135 512, /* l2 cache */
1136 1, /* prefetch streams /*/
1137 0, /* SF->DF convert */
1140 /* Instruction costs on POWER4 and POWER5 processors. */
1141 static const
1142 struct processor_costs power4_cost = {
1143 COSTS_N_INSNS (3), /* mulsi */
1144 COSTS_N_INSNS (2), /* mulsi_const */
1145 COSTS_N_INSNS (2), /* mulsi_const9 */
1146 COSTS_N_INSNS (4), /* muldi */
1147 COSTS_N_INSNS (18), /* divsi */
1148 COSTS_N_INSNS (34), /* divdi */
1149 COSTS_N_INSNS (3), /* fp */
1150 COSTS_N_INSNS (3), /* dmul */
1151 COSTS_N_INSNS (17), /* sdiv */
1152 COSTS_N_INSNS (17), /* ddiv */
1153 128, /* cache line size */
1154 32, /* l1 cache */
1155 1024, /* l2 cache */
1156 8, /* prefetch streams /*/
1157 0, /* SF->DF convert */
1160 /* Instruction costs on POWER6 processors. */
1161 static const
1162 struct processor_costs power6_cost = {
1163 COSTS_N_INSNS (8), /* mulsi */
1164 COSTS_N_INSNS (8), /* mulsi_const */
1165 COSTS_N_INSNS (8), /* mulsi_const9 */
1166 COSTS_N_INSNS (8), /* muldi */
1167 COSTS_N_INSNS (22), /* divsi */
1168 COSTS_N_INSNS (28), /* divdi */
1169 COSTS_N_INSNS (3), /* fp */
1170 COSTS_N_INSNS (3), /* dmul */
1171 COSTS_N_INSNS (13), /* sdiv */
1172 COSTS_N_INSNS (16), /* ddiv */
1173 128, /* cache line size */
1174 64, /* l1 cache */
1175 2048, /* l2 cache */
1176 16, /* prefetch streams */
1177 0, /* SF->DF convert */
1180 /* Instruction costs on POWER7 processors. */
1181 static const
1182 struct processor_costs power7_cost = {
1183 COSTS_N_INSNS (2), /* mulsi */
1184 COSTS_N_INSNS (2), /* mulsi_const */
1185 COSTS_N_INSNS (2), /* mulsi_const9 */
1186 COSTS_N_INSNS (2), /* muldi */
1187 COSTS_N_INSNS (18), /* divsi */
1188 COSTS_N_INSNS (34), /* divdi */
1189 COSTS_N_INSNS (3), /* fp */
1190 COSTS_N_INSNS (3), /* dmul */
1191 COSTS_N_INSNS (13), /* sdiv */
1192 COSTS_N_INSNS (16), /* ddiv */
1193 128, /* cache line size */
1194 32, /* l1 cache */
1195 256, /* l2 cache */
1196 12, /* prefetch streams */
1197 COSTS_N_INSNS (3), /* SF->DF convert */
1200 /* Instruction costs on POWER8 processors. */
1201 static const
1202 struct processor_costs power8_cost = {
1203 COSTS_N_INSNS (3), /* mulsi */
1204 COSTS_N_INSNS (3), /* mulsi_const */
1205 COSTS_N_INSNS (3), /* mulsi_const9 */
1206 COSTS_N_INSNS (3), /* muldi */
1207 COSTS_N_INSNS (19), /* divsi */
1208 COSTS_N_INSNS (35), /* divdi */
1209 COSTS_N_INSNS (3), /* fp */
1210 COSTS_N_INSNS (3), /* dmul */
1211 COSTS_N_INSNS (14), /* sdiv */
1212 COSTS_N_INSNS (17), /* ddiv */
1213 128, /* cache line size */
1214 32, /* l1 cache */
1215 256, /* l2 cache */
1216 12, /* prefetch streams */
1217 COSTS_N_INSNS (3), /* SF->DF convert */
1220 /* Instruction costs on POWER9 processors. */
1221 static const
1222 struct processor_costs power9_cost = {
1223 COSTS_N_INSNS (3), /* mulsi */
1224 COSTS_N_INSNS (3), /* mulsi_const */
1225 COSTS_N_INSNS (3), /* mulsi_const9 */
1226 COSTS_N_INSNS (3), /* muldi */
1227 COSTS_N_INSNS (8), /* divsi */
1228 COSTS_N_INSNS (12), /* divdi */
1229 COSTS_N_INSNS (3), /* fp */
1230 COSTS_N_INSNS (3), /* dmul */
1231 COSTS_N_INSNS (13), /* sdiv */
1232 COSTS_N_INSNS (18), /* ddiv */
1233 128, /* cache line size */
1234 32, /* l1 cache */
1235 512, /* l2 cache */
1236 8, /* prefetch streams */
1237 COSTS_N_INSNS (3), /* SF->DF convert */
1240 /* Instruction costs on POWER A2 processors. */
1241 static const
1242 struct processor_costs ppca2_cost = {
1243 COSTS_N_INSNS (16), /* mulsi */
1244 COSTS_N_INSNS (16), /* mulsi_const */
1245 COSTS_N_INSNS (16), /* mulsi_const9 */
1246 COSTS_N_INSNS (16), /* muldi */
1247 COSTS_N_INSNS (22), /* divsi */
1248 COSTS_N_INSNS (28), /* divdi */
1249 COSTS_N_INSNS (3), /* fp */
1250 COSTS_N_INSNS (3), /* dmul */
1251 COSTS_N_INSNS (59), /* sdiv */
1252 COSTS_N_INSNS (72), /* ddiv */
1254 16, /* l1 cache */
1255 2048, /* l2 cache */
1256 16, /* prefetch streams */
1257 0, /* SF->DF convert */
1261 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1262 #undef RS6000_BUILTIN_0
1263 #undef RS6000_BUILTIN_1
1264 #undef RS6000_BUILTIN_2
1265 #undef RS6000_BUILTIN_3
1266 #undef RS6000_BUILTIN_A
1267 #undef RS6000_BUILTIN_D
1268 #undef RS6000_BUILTIN_H
1269 #undef RS6000_BUILTIN_P
1270 #undef RS6000_BUILTIN_Q
1271 #undef RS6000_BUILTIN_X
1273 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1274 { NAME, ICODE, MASK, ATTR },
1276 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1312 #include "rs6000-builtin.def"
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_Q
1324 #undef RS6000_BUILTIN_X
1326 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1327 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1330 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1331 static struct machine_function * rs6000_init_machine_status (void);
1332 static int rs6000_ra_ever_killed (void);
1333 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1337 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1338 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1339 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1340 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1341 bool);
1342 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1343 unsigned int);
1344 static bool is_microcoded_insn (rtx_insn *);
1345 static bool is_nonpipeline_insn (rtx_insn *);
1346 static bool is_cracked_insn (rtx_insn *);
1347 static bool is_load_insn (rtx, rtx *);
1348 static bool is_store_insn (rtx, rtx *);
1349 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1350 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1351 static bool insn_must_be_first_in_group (rtx_insn *);
1352 static bool insn_must_be_last_in_group (rtx_insn *);
1353 static void altivec_init_builtins (void);
1354 static tree builtin_function_type (machine_mode, machine_mode,
1355 machine_mode, machine_mode,
1356 enum rs6000_builtins, const char *name);
1357 static void rs6000_common_init_builtins (void);
1358 static void paired_init_builtins (void);
1359 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1360 static void htm_init_builtins (void);
1361 static rs6000_stack_t *rs6000_stack_info (void);
1362 static void is_altivec_return_reg (rtx, void *);
1363 int easy_vector_constant (rtx, machine_mode);
1364 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1365 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1366 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1367 bool, bool);
1368 #if TARGET_MACHO
1369 static void macho_branch_islands (void);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1378 machine_mode, rtx);
1379 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1380 machine_mode,
1381 rtx);
1382 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1383 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1384 enum reg_class);
1385 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1386 reg_class_t,
1387 reg_class_t);
1388 static bool rs6000_debug_can_change_mode_class (machine_mode,
1389 machine_mode,
1390 reg_class_t);
1391 static bool rs6000_save_toc_in_prologue_p (void);
1392 static rtx rs6000_internal_arg_pointer (void);
1394 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1395 int, int *)
1396 = rs6000_legitimize_reload_address;
1398 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1399 = rs6000_mode_dependent_address;
1401 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1402 machine_mode, rtx)
1403 = rs6000_secondary_reload_class;
1405 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1406 = rs6000_preferred_reload_class;
1408 const int INSN_NOT_AVAILABLE = -1;
1410 static void rs6000_print_isa_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static void rs6000_print_builtin_options (FILE *, int, const char *,
1413 HOST_WIDE_INT);
1414 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1417 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1418 enum rs6000_reg_type,
1419 machine_mode,
1420 secondary_reload_info *,
1421 bool);
1422 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1423 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1424 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426 /* Hash table stuff for keeping track of TOC entries. */
1428 struct GTY((for_user)) toc_hash_struct
1430 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1431 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1432 rtx key;
1433 machine_mode key_mode;
1434 int labelno;
1437 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 static hashval_t hash (toc_hash_struct *);
1440 static bool equal (toc_hash_struct *, toc_hash_struct *);
1443 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445 /* Hash table to keep track of the argument types for builtin functions. */
1447 struct GTY((for_user)) builtin_hash_struct
1449 tree type;
1450 machine_mode mode[4]; /* return value + 3 arguments. */
1451 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1454 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 static hashval_t hash (builtin_hash_struct *);
1457 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1460 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1463 /* Default register names. */
1464 char rs6000_reg_names[][8] =
1466 "0", "1", "2", "3", "4", "5", "6", "7",
1467 "8", "9", "10", "11", "12", "13", "14", "15",
1468 "16", "17", "18", "19", "20", "21", "22", "23",
1469 "24", "25", "26", "27", "28", "29", "30", "31",
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 "mq", "lr", "ctr","ap",
1475 "0", "1", "2", "3", "4", "5", "6", "7",
1476 "ca",
1477 /* AltiVec registers. */
1478 "0", "1", "2", "3", "4", "5", "6", "7",
1479 "8", "9", "10", "11", "12", "13", "14", "15",
1480 "16", "17", "18", "19", "20", "21", "22", "23",
1481 "24", "25", "26", "27", "28", "29", "30", "31",
1482 "vrsave", "vscr",
1483 /* Soft frame pointer. */
1484 "sfp",
1485 /* HTM SPR registers. */
1486 "tfhar", "tfiar", "texasr"
1489 #ifdef TARGET_REGNAMES
1490 static const char alt_reg_names[][8] =
1492 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1493 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1494 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1495 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1496 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1497 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1498 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1499 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1500 "mq", "lr", "ctr", "ap",
1501 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1502 "ca",
1503 /* AltiVec registers. */
1504 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1505 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1506 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1507 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1508 "vrsave", "vscr",
1509 /* Soft frame pointer. */
1510 "sfp",
1511 /* HTM SPR registers. */
1512 "tfhar", "tfiar", "texasr"
1514 #endif
1516 /* Table of valid machine attributes. */
1518 static const struct attribute_spec rs6000_attribute_table[] =
1520 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1521 affects_type_identity } */
1522 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1523 false },
1524 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1525 false },
1526 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1527 false },
1528 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1529 false },
1530 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1531 false },
1532 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1533 SUBTARGET_ATTRIBUTE_TABLE,
1534 #endif
1535 { NULL, 0, 0, false, false, false, NULL, false }
1538 #ifndef TARGET_PROFILE_KERNEL
1539 #define TARGET_PROFILE_KERNEL 0
1540 #endif
1542 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1543 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 /* Initialize the GCC target structure. */
1546 #undef TARGET_ATTRIBUTE_TABLE
1547 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1548 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1549 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1550 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1551 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553 #undef TARGET_ASM_ALIGNED_DI_OP
1554 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556 /* Default unaligned ops are only provided for ELF. Find the ops needed
1557 for non-ELF systems. */
1558 #ifndef OBJECT_FORMAT_ELF
1559 #if TARGET_XCOFF
1560 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1561 64-bit targets. */
1562 #undef TARGET_ASM_UNALIGNED_HI_OP
1563 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1564 #undef TARGET_ASM_UNALIGNED_SI_OP
1565 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1566 #undef TARGET_ASM_UNALIGNED_DI_OP
1567 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1568 #else
1569 /* For Darwin. */
1570 #undef TARGET_ASM_UNALIGNED_HI_OP
1571 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1572 #undef TARGET_ASM_UNALIGNED_SI_OP
1573 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1574 #undef TARGET_ASM_UNALIGNED_DI_OP
1575 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1576 #undef TARGET_ASM_ALIGNED_DI_OP
1577 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1578 #endif
1579 #endif
1581 /* This hook deals with fixups for relocatable code and DI-mode objects
1582 in 64-bit code. */
1583 #undef TARGET_ASM_INTEGER
1584 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1587 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1588 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1589 #endif
1591 #undef TARGET_SET_UP_BY_PROLOGUE
1592 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1595 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1596 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1597 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1598 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1602 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1604 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1605 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1608 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610 #undef TARGET_INTERNAL_ARG_POINTER
1611 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613 #undef TARGET_HAVE_TLS
1614 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616 #undef TARGET_CANNOT_FORCE_CONST_MEM
1617 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619 #undef TARGET_DELEGITIMIZE_ADDRESS
1620 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1623 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625 #undef TARGET_LEGITIMATE_COMBINED_INSN
1626 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628 #undef TARGET_ASM_FUNCTION_PROLOGUE
1629 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1630 #undef TARGET_ASM_FUNCTION_EPILOGUE
1631 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1634 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636 #undef TARGET_LEGITIMIZE_ADDRESS
1637 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639 #undef TARGET_SCHED_VARIABLE_ISSUE
1640 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642 #undef TARGET_SCHED_ISSUE_RATE
1643 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1644 #undef TARGET_SCHED_ADJUST_COST
1645 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1646 #undef TARGET_SCHED_ADJUST_PRIORITY
1647 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1648 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1649 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1650 #undef TARGET_SCHED_INIT
1651 #define TARGET_SCHED_INIT rs6000_sched_init
1652 #undef TARGET_SCHED_FINISH
1653 #define TARGET_SCHED_FINISH rs6000_sched_finish
1654 #undef TARGET_SCHED_REORDER
1655 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1656 #undef TARGET_SCHED_REORDER2
1657 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1660 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1663 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1666 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1667 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1668 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1669 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1670 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1671 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1672 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1675 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1678 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1679 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1680 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1681 rs6000_builtin_support_vector_misalignment
1682 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1683 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1684 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1685 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1686 rs6000_builtin_vectorization_cost
1687 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1688 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1689 rs6000_preferred_simd_mode
1690 #undef TARGET_VECTORIZE_INIT_COST
1691 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1692 #undef TARGET_VECTORIZE_ADD_STMT_COST
1693 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1694 #undef TARGET_VECTORIZE_FINISH_COST
1695 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1696 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1697 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699 #undef TARGET_INIT_BUILTINS
1700 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1701 #undef TARGET_BUILTIN_DECL
1702 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704 #undef TARGET_FOLD_BUILTIN
1705 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1706 #undef TARGET_GIMPLE_FOLD_BUILTIN
1707 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709 #undef TARGET_EXPAND_BUILTIN
1710 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712 #undef TARGET_MANGLE_TYPE
1713 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715 #undef TARGET_INIT_LIBFUNCS
1716 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718 #if TARGET_MACHO
1719 #undef TARGET_BINDS_LOCAL_P
1720 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1721 #endif
1723 #undef TARGET_MS_BITFIELD_LAYOUT_P
1724 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726 #undef TARGET_ASM_OUTPUT_MI_THUNK
1727 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1730 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1733 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735 #undef TARGET_REGISTER_MOVE_COST
1736 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1737 #undef TARGET_MEMORY_MOVE_COST
1738 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1739 #undef TARGET_CANNOT_COPY_INSN_P
1740 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1741 #undef TARGET_RTX_COSTS
1742 #define TARGET_RTX_COSTS rs6000_rtx_costs
1743 #undef TARGET_ADDRESS_COST
1744 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1745 #undef TARGET_INSN_COST
1746 #define TARGET_INSN_COST rs6000_insn_cost
1748 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1749 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751 #undef TARGET_PROMOTE_FUNCTION_MODE
1752 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754 #undef TARGET_RETURN_IN_MEMORY
1755 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757 #undef TARGET_RETURN_IN_MSB
1758 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760 #undef TARGET_SETUP_INCOMING_VARARGS
1761 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763 /* Always strict argument naming on rs6000. */
1764 #undef TARGET_STRICT_ARGUMENT_NAMING
1765 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1767 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1768 #undef TARGET_SPLIT_COMPLEX_ARG
1769 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1770 #undef TARGET_MUST_PASS_IN_STACK
1771 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1772 #undef TARGET_PASS_BY_REFERENCE
1773 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1774 #undef TARGET_ARG_PARTIAL_BYTES
1775 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1776 #undef TARGET_FUNCTION_ARG_ADVANCE
1777 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1778 #undef TARGET_FUNCTION_ARG
1779 #define TARGET_FUNCTION_ARG rs6000_function_arg
1780 #undef TARGET_FUNCTION_ARG_PADDING
1781 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1782 #undef TARGET_FUNCTION_ARG_BOUNDARY
1783 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785 #undef TARGET_BUILD_BUILTIN_VA_LIST
1786 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788 #undef TARGET_EXPAND_BUILTIN_VA_START
1789 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1792 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794 #undef TARGET_EH_RETURN_FILTER_MODE
1795 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1798 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1800 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1801 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1803 #undef TARGET_FLOATN_MODE
1804 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1806 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1807 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1809 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1810 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1812 #undef TARGET_MD_ASM_ADJUST
1813 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815 #undef TARGET_OPTION_OVERRIDE
1816 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1819 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1820 rs6000_builtin_vectorized_function
1822 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1823 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1824 rs6000_builtin_md_vectorized_function
1826 #undef TARGET_STACK_PROTECT_GUARD
1827 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829 #if !TARGET_MACHO
1830 #undef TARGET_STACK_PROTECT_FAIL
1831 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1832 #endif
1834 #ifdef HAVE_AS_TLS
1835 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1836 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1837 #endif
1839 /* Use a 32-bit anchor range. This leads to sequences like:
1841 addis tmp,anchor,high
1842 add dest,tmp,low
1844 where tmp itself acts as an anchor, and can be shared between
1845 accesses to the same 64k page. */
1846 #undef TARGET_MIN_ANCHOR_OFFSET
1847 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1848 #undef TARGET_MAX_ANCHOR_OFFSET
1849 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1850 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1851 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1852 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1853 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855 #undef TARGET_BUILTIN_RECIPROCAL
1856 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858 #undef TARGET_SECONDARY_RELOAD
1859 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED
1861 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1862 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1863 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865 #undef TARGET_LEGITIMATE_ADDRESS_P
1866 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1869 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1872 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874 #undef TARGET_CAN_ELIMINATE
1875 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1878 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1881 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883 #undef TARGET_TRAMPOLINE_INIT
1884 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886 #undef TARGET_FUNCTION_VALUE
1887 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1890 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892 #undef TARGET_OPTION_SAVE
1893 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895 #undef TARGET_OPTION_RESTORE
1896 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898 #undef TARGET_OPTION_PRINT
1899 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901 #undef TARGET_CAN_INLINE_P
1902 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904 #undef TARGET_SET_CURRENT_FUNCTION
1905 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907 #undef TARGET_LEGITIMATE_CONSTANT_P
1908 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1911 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1913 #undef TARGET_CAN_USE_DOLOOP_P
1914 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1917 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1920 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1921 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1922 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1923 #undef TARGET_UNWIND_WORD_MODE
1924 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926 #undef TARGET_OFFLOAD_OPTIONS
1927 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929 #undef TARGET_C_MODE_FOR_SUFFIX
1930 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932 #undef TARGET_INVALID_BINARY_OP
1933 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935 #undef TARGET_OPTAB_SUPPORTED_P
1936 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1939 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941 #undef TARGET_COMPARE_VERSION_PRIORITY
1942 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1945 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1946 rs6000_generate_version_dispatcher_body
1948 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1949 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1950 rs6000_get_function_versions_dispatcher
1952 #undef TARGET_OPTION_FUNCTION_VERSIONS
1953 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955 #undef TARGET_HARD_REGNO_NREGS
1956 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1957 #undef TARGET_HARD_REGNO_MODE_OK
1958 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960 #undef TARGET_MODES_TIEABLE_P
1961 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1964 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1965 rs6000_hard_regno_call_part_clobbered
1967 #undef TARGET_SLOW_UNALIGNED_ACCESS
1968 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970 #undef TARGET_CAN_CHANGE_MODE_CLASS
1971 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973 #undef TARGET_CONSTANT_ALIGNMENT
1974 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976 #undef TARGET_STARTING_FRAME_OFFSET
1977 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1980 /* Processor table. */
1981 struct rs6000_ptt
1983 const char *const name; /* Canonical processor name. */
1984 const enum processor_type processor; /* Processor type enum value. */
1985 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1988 static struct rs6000_ptt const processor_target_table[] =
1990 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1991 #include "rs6000-cpus.def"
1992 #undef RS6000_CPU
1995 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1996 name is invalid. */
1998 static int
1999 rs6000_cpu_name_lookup (const char *name)
2001 size_t i;
2003 if (name != NULL)
2005 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2006 if (! strcmp (name, processor_target_table[i].name))
2007 return (int)i;
2010 return -1;
2014 /* Return number of consecutive hard regs needed starting at reg REGNO
2015 to hold something of mode MODE.
2016 This is ordinarily the length in words of a value of mode MODE
2017 but can be less for certain modes in special long registers.
2019 POWER and PowerPC GPRs hold 32 bits worth;
2020 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2022 static int
2023 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2025 unsigned HOST_WIDE_INT reg_size;
2027 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2028 128-bit floating point that can go in vector registers, which has VSX
2029 memory addressing. */
2030 if (FP_REGNO_P (regno))
2031 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2032 ? UNITS_PER_VSX_WORD
2033 : UNITS_PER_FP_WORD);
2035 else if (ALTIVEC_REGNO_P (regno))
2036 reg_size = UNITS_PER_ALTIVEC_WORD;
2038 else
2039 reg_size = UNITS_PER_WORD;
2041 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2044 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2045 MODE. */
2046 static int
2047 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2049 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2051 if (COMPLEX_MODE_P (mode))
2052 mode = GET_MODE_INNER (mode);
2054 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2055 register combinations, and use PTImode where we need to deal with quad
2056 word memory operations. Don't allow quad words in the argument or frame
2057 pointer registers, just registers 0..31. */
2058 if (mode == PTImode)
2059 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2060 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2061 && ((regno & 1) == 0));
2063 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2064 implementations. Don't allow an item to be split between a FP register
2065 and an Altivec register. Allow TImode in all VSX registers if the user
2066 asked for it. */
2067 if (TARGET_VSX && VSX_REGNO_P (regno)
2068 && (VECTOR_MEM_VSX_P (mode)
2069 || FLOAT128_VECTOR_P (mode)
2070 || reg_addr[mode].scalar_in_vmx_p
2071 || mode == TImode
2072 || (TARGET_VADDUQM && mode == V1TImode)))
2074 if (FP_REGNO_P (regno))
2075 return FP_REGNO_P (last_regno);
2077 if (ALTIVEC_REGNO_P (regno))
2079 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2080 return 0;
2082 return ALTIVEC_REGNO_P (last_regno);
2086 /* The GPRs can hold any mode, but values bigger than one register
2087 cannot go past R31. */
2088 if (INT_REGNO_P (regno))
2089 return INT_REGNO_P (last_regno);
2091 /* The float registers (except for VSX vector modes) can only hold floating
2092 modes and DImode. */
2093 if (FP_REGNO_P (regno))
2095 if (FLOAT128_VECTOR_P (mode))
2096 return false;
2098 if (SCALAR_FLOAT_MODE_P (mode)
2099 && (mode != TDmode || (regno % 2) == 0)
2100 && FP_REGNO_P (last_regno))
2101 return 1;
2103 if (GET_MODE_CLASS (mode) == MODE_INT)
2105 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2106 return 1;
2108 if (TARGET_P8_VECTOR && (mode == SImode))
2109 return 1;
2111 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2112 return 1;
2115 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2116 && PAIRED_VECTOR_MODE (mode))
2117 return 1;
2119 return 0;
2122 /* The CR register can only hold CC modes. */
2123 if (CR_REGNO_P (regno))
2124 return GET_MODE_CLASS (mode) == MODE_CC;
2126 if (CA_REGNO_P (regno))
2127 return mode == Pmode || mode == SImode;
2129 /* AltiVec only in AldyVec registers. */
2130 if (ALTIVEC_REGNO_P (regno))
2131 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2132 || mode == V1TImode);
2134 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2135 and it must be able to fit within the register set. */
2137 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2140 /* Implement TARGET_HARD_REGNO_NREGS. */
2142 static unsigned int
2143 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2145 return rs6000_hard_regno_nregs[mode][regno];
2148 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2150 static bool
2151 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2153 return rs6000_hard_regno_mode_ok_p[mode][regno];
2156 /* Implement TARGET_MODES_TIEABLE_P.
2158 PTImode cannot tie with other modes because PTImode is restricted to even
2159 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2160 57744).
2162 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2163 128-bit floating point on VSX systems ties with other vectors. */
2165 static bool
2166 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2168 if (mode1 == PTImode)
2169 return mode2 == PTImode;
2170 if (mode2 == PTImode)
2171 return false;
2173 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2174 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2175 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2176 return false;
2178 if (SCALAR_FLOAT_MODE_P (mode1))
2179 return SCALAR_FLOAT_MODE_P (mode2);
2180 if (SCALAR_FLOAT_MODE_P (mode2))
2181 return false;
2183 if (GET_MODE_CLASS (mode1) == MODE_CC)
2184 return GET_MODE_CLASS (mode2) == MODE_CC;
2185 if (GET_MODE_CLASS (mode2) == MODE_CC)
2186 return false;
2188 if (PAIRED_VECTOR_MODE (mode1))
2189 return PAIRED_VECTOR_MODE (mode2);
2190 if (PAIRED_VECTOR_MODE (mode2))
2191 return false;
2193 return true;
2196 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2198 static bool
2199 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2201 if (TARGET_32BIT
2202 && TARGET_POWERPC64
2203 && GET_MODE_SIZE (mode) > 4
2204 && INT_REGNO_P (regno))
2205 return true;
2207 if (TARGET_VSX
2208 && FP_REGNO_P (regno)
2209 && GET_MODE_SIZE (mode) > 8
2210 && !FLOAT128_2REG_P (mode))
2211 return true;
2213 return false;
2216 /* Print interesting facts about registers. */
2217 static void
2218 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2220 int r, m;
2222 for (r = first_regno; r <= last_regno; ++r)
2224 const char *comma = "";
2225 int len;
2227 if (first_regno == last_regno)
2228 fprintf (stderr, "%s:\t", reg_name);
2229 else
2230 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2232 len = 8;
2233 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2234 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2236 if (len > 70)
2238 fprintf (stderr, ",\n\t");
2239 len = 8;
2240 comma = "";
2243 if (rs6000_hard_regno_nregs[m][r] > 1)
2244 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2245 rs6000_hard_regno_nregs[m][r]);
2246 else
2247 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2249 comma = ", ";
2252 if (call_used_regs[r])
2254 if (len > 70)
2256 fprintf (stderr, ",\n\t");
2257 len = 8;
2258 comma = "";
2261 len += fprintf (stderr, "%s%s", comma, "call-used");
2262 comma = ", ";
2265 if (fixed_regs[r])
2267 if (len > 70)
2269 fprintf (stderr, ",\n\t");
2270 len = 8;
2271 comma = "";
2274 len += fprintf (stderr, "%s%s", comma, "fixed");
2275 comma = ", ";
2278 if (len > 70)
2280 fprintf (stderr, ",\n\t");
2281 comma = "";
2284 len += fprintf (stderr, "%sreg-class = %s", comma,
2285 reg_class_names[(int)rs6000_regno_regclass[r]]);
2286 comma = ", ";
2288 if (len > 70)
2290 fprintf (stderr, ",\n\t");
2291 comma = "";
2294 fprintf (stderr, "%sregno = %d\n", comma, r);
2298 static const char *
2299 rs6000_debug_vector_unit (enum rs6000_vector v)
2301 const char *ret;
2303 switch (v)
2305 case VECTOR_NONE: ret = "none"; break;
2306 case VECTOR_ALTIVEC: ret = "altivec"; break;
2307 case VECTOR_VSX: ret = "vsx"; break;
2308 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2309 case VECTOR_PAIRED: ret = "paired"; break;
2310 case VECTOR_OTHER: ret = "other"; break;
2311 default: ret = "unknown"; break;
2314 return ret;
2317 /* Inner function printing just the address mask for a particular reload
2318 register class. */
2319 DEBUG_FUNCTION char *
2320 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2322 static char ret[8];
2323 char *p = ret;
2325 if ((mask & RELOAD_REG_VALID) != 0)
2326 *p++ = 'v';
2327 else if (keep_spaces)
2328 *p++ = ' ';
2330 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2331 *p++ = 'm';
2332 else if (keep_spaces)
2333 *p++ = ' ';
2335 if ((mask & RELOAD_REG_INDEXED) != 0)
2336 *p++ = 'i';
2337 else if (keep_spaces)
2338 *p++ = ' ';
2340 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2341 *p++ = 'O';
2342 else if ((mask & RELOAD_REG_OFFSET) != 0)
2343 *p++ = 'o';
2344 else if (keep_spaces)
2345 *p++ = ' ';
2347 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2348 *p++ = '+';
2349 else if (keep_spaces)
2350 *p++ = ' ';
2352 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2353 *p++ = '+';
2354 else if (keep_spaces)
2355 *p++ = ' ';
2357 if ((mask & RELOAD_REG_AND_M16) != 0)
2358 *p++ = '&';
2359 else if (keep_spaces)
2360 *p++ = ' ';
2362 *p = '\0';
2364 return ret;
2367 /* Print the address masks in a human readble fashion. */
2368 DEBUG_FUNCTION void
2369 rs6000_debug_print_mode (ssize_t m)
2371 ssize_t rc;
2372 int spaces = 0;
2373 bool fuse_extra_p;
2375 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2376 for (rc = 0; rc < N_RELOAD_REG; rc++)
2377 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2378 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2381 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2382 fprintf (stderr, " Reload=%c%c",
2383 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2384 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2385 else
2386 spaces += sizeof (" Reload=sl") - 1;
2388 if (reg_addr[m].scalar_in_vmx_p)
2390 fprintf (stderr, "%*s Upper=y", spaces, "");
2391 spaces = 0;
2393 else
2394 spaces += sizeof (" Upper=y") - 1;
2396 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2397 || reg_addr[m].fused_toc);
2398 if (!fuse_extra_p)
2400 for (rc = 0; rc < N_RELOAD_REG; rc++)
2402 if (rc != RELOAD_REG_ANY)
2404 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2405 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2406 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2407 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2408 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2410 fuse_extra_p = true;
2411 break;
2417 if (fuse_extra_p)
2419 fprintf (stderr, "%*s Fuse:", spaces, "");
2420 spaces = 0;
2422 for (rc = 0; rc < N_RELOAD_REG; rc++)
2424 if (rc != RELOAD_REG_ANY)
2426 char load, store;
2428 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2429 load = 'l';
2430 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2431 load = 'L';
2432 else
2433 load = '-';
2435 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2436 store = 's';
2437 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2438 store = 'S';
2439 else
2440 store = '-';
2442 if (load == '-' && store == '-')
2443 spaces += 5;
2444 else
2446 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2447 reload_reg_map[rc].name[0], load, store);
2448 spaces = 0;
2453 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2455 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2456 spaces = 0;
2458 else
2459 spaces += sizeof (" P8gpr") - 1;
2461 if (reg_addr[m].fused_toc)
2463 fprintf (stderr, "%*sToc", (spaces + 1), "");
2464 spaces = 0;
2466 else
2467 spaces += sizeof (" Toc") - 1;
2469 else
2470 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2472 if (rs6000_vector_unit[m] != VECTOR_NONE
2473 || rs6000_vector_mem[m] != VECTOR_NONE)
2475 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2476 spaces, "",
2477 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2478 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2481 fputs ("\n", stderr);
2484 #define DEBUG_FMT_ID "%-32s= "
2485 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2486 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2487 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2489 /* Print various interesting information with -mdebug=reg. */
2490 static void
2491 rs6000_debug_reg_global (void)
2493 static const char *const tf[2] = { "false", "true" };
2494 const char *nl = (const char *)0;
2495 int m;
2496 size_t m1, m2, v;
2497 char costly_num[20];
2498 char nop_num[20];
2499 char flags_buffer[40];
2500 const char *costly_str;
2501 const char *nop_str;
2502 const char *trace_str;
2503 const char *abi_str;
2504 const char *cmodel_str;
2505 struct cl_target_option cl_opts;
2507 /* Modes we want tieable information on. */
2508 static const machine_mode print_tieable_modes[] = {
2509 QImode,
2510 HImode,
2511 SImode,
2512 DImode,
2513 TImode,
2514 PTImode,
2515 SFmode,
2516 DFmode,
2517 TFmode,
2518 IFmode,
2519 KFmode,
2520 SDmode,
2521 DDmode,
2522 TDmode,
2523 V2SImode,
2524 V16QImode,
2525 V8HImode,
2526 V4SImode,
2527 V2DImode,
2528 V1TImode,
2529 V32QImode,
2530 V16HImode,
2531 V8SImode,
2532 V4DImode,
2533 V2TImode,
2534 V2SFmode,
2535 V4SFmode,
2536 V2DFmode,
2537 V8SFmode,
2538 V4DFmode,
2539 CCmode,
2540 CCUNSmode,
2541 CCEQmode,
2544 /* Virtual regs we are interested in. */
2545 const static struct {
2546 int regno; /* register number. */
2547 const char *name; /* register name. */
2548 } virtual_regs[] = {
2549 { STACK_POINTER_REGNUM, "stack pointer:" },
2550 { TOC_REGNUM, "toc: " },
2551 { STATIC_CHAIN_REGNUM, "static chain: " },
2552 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2553 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2554 { ARG_POINTER_REGNUM, "arg pointer: " },
2555 { FRAME_POINTER_REGNUM, "frame pointer:" },
2556 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2557 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2558 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2559 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2560 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2561 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2562 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2563 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2564 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2567 fputs ("\nHard register information:\n", stderr);
2568 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2569 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2570 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2571 LAST_ALTIVEC_REGNO,
2572 "vs");
2573 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2574 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2575 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2576 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2577 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2578 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2580 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2581 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2582 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2584 fprintf (stderr,
2585 "\n"
2586 "d reg_class = %s\n"
2587 "f reg_class = %s\n"
2588 "v reg_class = %s\n"
2589 "wa reg_class = %s\n"
2590 "wb reg_class = %s\n"
2591 "wd reg_class = %s\n"
2592 "we reg_class = %s\n"
2593 "wf reg_class = %s\n"
2594 "wg reg_class = %s\n"
2595 "wh reg_class = %s\n"
2596 "wi reg_class = %s\n"
2597 "wj reg_class = %s\n"
2598 "wk reg_class = %s\n"
2599 "wl reg_class = %s\n"
2600 "wm reg_class = %s\n"
2601 "wo reg_class = %s\n"
2602 "wp reg_class = %s\n"
2603 "wq reg_class = %s\n"
2604 "wr reg_class = %s\n"
2605 "ws reg_class = %s\n"
2606 "wt reg_class = %s\n"
2607 "wu reg_class = %s\n"
2608 "wv reg_class = %s\n"
2609 "ww reg_class = %s\n"
2610 "wx reg_class = %s\n"
2611 "wy reg_class = %s\n"
2612 "wz reg_class = %s\n"
2613 "wA reg_class = %s\n"
2614 "wH reg_class = %s\n"
2615 "wI reg_class = %s\n"
2616 "wJ reg_class = %s\n"
2617 "wK reg_class = %s\n"
2618 "\n",
2619 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2652 nl = "\n";
2653 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2654 rs6000_debug_print_mode (m);
2656 fputs ("\n", stderr);
2658 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2660 machine_mode mode1 = print_tieable_modes[m1];
2661 bool first_time = true;
2663 nl = (const char *)0;
2664 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2666 machine_mode mode2 = print_tieable_modes[m2];
2667 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2669 if (first_time)
2671 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2672 nl = "\n";
2673 first_time = false;
2676 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2680 if (!first_time)
2681 fputs ("\n", stderr);
2684 if (nl)
2685 fputs (nl, stderr);
2687 if (rs6000_recip_control)
2689 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2691 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2692 if (rs6000_recip_bits[m])
2694 fprintf (stderr,
2695 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2696 GET_MODE_NAME (m),
2697 (RS6000_RECIP_AUTO_RE_P (m)
2698 ? "auto"
2699 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2700 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2701 ? "auto"
2702 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2705 fputs ("\n", stderr);
2708 if (rs6000_cpu_index >= 0)
2710 const char *name = processor_target_table[rs6000_cpu_index].name;
2711 HOST_WIDE_INT flags
2712 = processor_target_table[rs6000_cpu_index].target_enable;
2714 sprintf (flags_buffer, "-mcpu=%s flags", name);
2715 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2717 else
2718 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2720 if (rs6000_tune_index >= 0)
2722 const char *name = processor_target_table[rs6000_tune_index].name;
2723 HOST_WIDE_INT flags
2724 = processor_target_table[rs6000_tune_index].target_enable;
2726 sprintf (flags_buffer, "-mtune=%s flags", name);
2727 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2729 else
2730 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2732 cl_target_option_save (&cl_opts, &global_options);
2733 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2734 rs6000_isa_flags);
2736 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2737 rs6000_isa_flags_explicit);
2739 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2740 rs6000_builtin_mask);
2742 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2744 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2745 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2747 switch (rs6000_sched_costly_dep)
2749 case max_dep_latency:
2750 costly_str = "max_dep_latency";
2751 break;
2753 case no_dep_costly:
2754 costly_str = "no_dep_costly";
2755 break;
2757 case all_deps_costly:
2758 costly_str = "all_deps_costly";
2759 break;
2761 case true_store_to_load_dep_costly:
2762 costly_str = "true_store_to_load_dep_costly";
2763 break;
2765 case store_to_load_dep_costly:
2766 costly_str = "store_to_load_dep_costly";
2767 break;
2769 default:
2770 costly_str = costly_num;
2771 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2772 break;
2775 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2777 switch (rs6000_sched_insert_nops)
2779 case sched_finish_regroup_exact:
2780 nop_str = "sched_finish_regroup_exact";
2781 break;
2783 case sched_finish_pad_groups:
2784 nop_str = "sched_finish_pad_groups";
2785 break;
2787 case sched_finish_none:
2788 nop_str = "sched_finish_none";
2789 break;
2791 default:
2792 nop_str = nop_num;
2793 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2794 break;
2797 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2799 switch (rs6000_sdata)
2801 default:
2802 case SDATA_NONE:
2803 break;
2805 case SDATA_DATA:
2806 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2807 break;
2809 case SDATA_SYSV:
2810 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2811 break;
2813 case SDATA_EABI:
2814 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2815 break;
2819 switch (rs6000_traceback)
2821 case traceback_default: trace_str = "default"; break;
2822 case traceback_none: trace_str = "none"; break;
2823 case traceback_part: trace_str = "part"; break;
2824 case traceback_full: trace_str = "full"; break;
2825 default: trace_str = "unknown"; break;
2828 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2830 switch (rs6000_current_cmodel)
2832 case CMODEL_SMALL: cmodel_str = "small"; break;
2833 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2834 case CMODEL_LARGE: cmodel_str = "large"; break;
2835 default: cmodel_str = "unknown"; break;
2838 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2840 switch (rs6000_current_abi)
2842 case ABI_NONE: abi_str = "none"; break;
2843 case ABI_AIX: abi_str = "aix"; break;
2844 case ABI_ELFv2: abi_str = "ELFv2"; break;
2845 case ABI_V4: abi_str = "V4"; break;
2846 case ABI_DARWIN: abi_str = "darwin"; break;
2847 default: abi_str = "unknown"; break;
2850 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2852 if (rs6000_altivec_abi)
2853 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2855 if (rs6000_darwin64_abi)
2856 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2858 fprintf (stderr, DEBUG_FMT_S, "single_float",
2859 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2861 fprintf (stderr, DEBUG_FMT_S, "double_float",
2862 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2864 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2865 (TARGET_SOFT_FLOAT ? "true" : "false"));
2867 if (TARGET_LINK_STACK)
2868 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2870 if (TARGET_P8_FUSION)
2872 char options[80];
2874 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2875 if (TARGET_TOC_FUSION)
2876 strcat (options, ", toc");
2878 if (TARGET_P8_FUSION_SIGN)
2879 strcat (options, ", sign");
2881 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2884 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2885 TARGET_SECURE_PLT ? "secure" : "bss");
2886 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2887 aix_struct_return ? "aix" : "sysv");
2888 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2889 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2890 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2891 tf[!!rs6000_align_branch_targets]);
2892 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2893 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2894 rs6000_long_double_type_size);
2895 if (rs6000_long_double_type_size == 128)
2897 fprintf (stderr, DEBUG_FMT_S, "long double type",
2898 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2899 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2900 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2902 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2903 (int)rs6000_sched_restricted_insns_priority);
2904 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2905 (int)END_BUILTINS);
2906 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2907 (int)RS6000_BUILTIN_COUNT);
2909 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2910 (int)TARGET_FLOAT128_ENABLE_TYPE);
2912 if (TARGET_VSX)
2913 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2914 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2916 if (TARGET_DIRECT_MOVE_128)
2917 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2918 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2922 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2923 legitimate address support to figure out the appropriate addressing to
2924 use. */
2926 static void
2927 rs6000_setup_reg_addr_masks (void)
2929 ssize_t rc, reg, m, nregs;
2930 addr_mask_type any_addr_mask, addr_mask;
2932 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2934 machine_mode m2 = (machine_mode) m;
2935 bool complex_p = false;
2936 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2937 size_t msize;
2939 if (COMPLEX_MODE_P (m2))
2941 complex_p = true;
2942 m2 = GET_MODE_INNER (m2);
2945 msize = GET_MODE_SIZE (m2);
2947 /* SDmode is special in that we want to access it only via REG+REG
2948 addressing on power7 and above, since we want to use the LFIWZX and
2949 STFIWZX instructions to load it. */
2950 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2952 any_addr_mask = 0;
2953 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2955 addr_mask = 0;
2956 reg = reload_reg_map[rc].reg;
2958 /* Can mode values go in the GPR/FPR/Altivec registers? */
2959 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2961 bool small_int_vsx_p = (small_int_p
2962 && (rc == RELOAD_REG_FPR
2963 || rc == RELOAD_REG_VMX));
2965 nregs = rs6000_hard_regno_nregs[m][reg];
2966 addr_mask |= RELOAD_REG_VALID;
2968 /* Indicate if the mode takes more than 1 physical register. If
2969 it takes a single register, indicate it can do REG+REG
2970 addressing. Small integers in VSX registers can only do
2971 REG+REG addressing. */
2972 if (small_int_vsx_p)
2973 addr_mask |= RELOAD_REG_INDEXED;
2974 else if (nregs > 1 || m == BLKmode || complex_p)
2975 addr_mask |= RELOAD_REG_MULTIPLE;
2976 else
2977 addr_mask |= RELOAD_REG_INDEXED;
2979 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2980 addressing. If we allow scalars into Altivec registers,
2981 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2983 if (TARGET_UPDATE
2984 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2985 && msize <= 8
2986 && !VECTOR_MODE_P (m2)
2987 && !FLOAT128_VECTOR_P (m2)
2988 && !complex_p
2989 && !small_int_vsx_p)
2991 addr_mask |= RELOAD_REG_PRE_INCDEC;
2993 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2994 we don't allow PRE_MODIFY for some multi-register
2995 operations. */
2996 switch (m)
2998 default:
2999 addr_mask |= RELOAD_REG_PRE_MODIFY;
3000 break;
3002 case E_DImode:
3003 if (TARGET_POWERPC64)
3004 addr_mask |= RELOAD_REG_PRE_MODIFY;
3005 break;
3007 case E_DFmode:
3008 case E_DDmode:
3009 if (TARGET_DF_INSN)
3010 addr_mask |= RELOAD_REG_PRE_MODIFY;
3011 break;
3016 /* GPR and FPR registers can do REG+OFFSET addressing, except
3017 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3018 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3019 if ((addr_mask != 0) && !indexed_only_p
3020 && msize <= 8
3021 && (rc == RELOAD_REG_GPR
3022 || ((msize == 8 || m2 == SFmode)
3023 && (rc == RELOAD_REG_FPR
3024 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3025 addr_mask |= RELOAD_REG_OFFSET;
3027 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3028 instructions are enabled. The offset for 128-bit VSX registers is
3029 only 12-bits. While GPRs can handle the full offset range, VSX
3030 registers can only handle the restricted range. */
3031 else if ((addr_mask != 0) && !indexed_only_p
3032 && msize == 16 && TARGET_P9_VECTOR
3033 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3034 || (m2 == TImode && TARGET_VSX)))
3036 addr_mask |= RELOAD_REG_OFFSET;
3037 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3038 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3041 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3042 addressing on 128-bit types. */
3043 if (rc == RELOAD_REG_VMX && msize == 16
3044 && (addr_mask & RELOAD_REG_VALID) != 0)
3045 addr_mask |= RELOAD_REG_AND_M16;
3047 reg_addr[m].addr_mask[rc] = addr_mask;
3048 any_addr_mask |= addr_mask;
3051 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3056 /* Initialize the various global tables that are based on register size. */
3057 static void
3058 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3060 ssize_t r, m, c;
3061 int align64;
3062 int align32;
3064 /* Precalculate REGNO_REG_CLASS. */
3065 rs6000_regno_regclass[0] = GENERAL_REGS;
3066 for (r = 1; r < 32; ++r)
3067 rs6000_regno_regclass[r] = BASE_REGS;
3069 for (r = 32; r < 64; ++r)
3070 rs6000_regno_regclass[r] = FLOAT_REGS;
3072 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3073 rs6000_regno_regclass[r] = NO_REGS;
3075 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3076 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3078 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3079 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3080 rs6000_regno_regclass[r] = CR_REGS;
3082 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3083 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3084 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3085 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3086 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3087 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3088 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3089 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3090 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3091 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3093 /* Precalculate register class to simpler reload register class. We don't
3094 need all of the register classes that are combinations of different
3095 classes, just the simple ones that have constraint letters. */
3096 for (c = 0; c < N_REG_CLASSES; c++)
3097 reg_class_to_reg_type[c] = NO_REG_TYPE;
3099 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3100 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3101 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3102 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3103 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3104 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3105 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3106 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3107 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3108 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3110 if (TARGET_VSX)
3112 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3113 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3115 else
3117 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3118 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3121 /* Precalculate the valid memory formats as well as the vector information,
3122 this must be set up before the rs6000_hard_regno_nregs_internal calls
3123 below. */
3124 gcc_assert ((int)VECTOR_NONE == 0);
3125 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3126 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3128 gcc_assert ((int)CODE_FOR_nothing == 0);
3129 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3131 gcc_assert ((int)NO_REGS == 0);
3132 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3134 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3135 believes it can use native alignment or still uses 128-bit alignment. */
3136 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3138 align64 = 64;
3139 align32 = 32;
3141 else
3143 align64 = 128;
3144 align32 = 128;
3147 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3148 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3149 if (TARGET_FLOAT128_TYPE)
3151 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3152 rs6000_vector_align[KFmode] = 128;
3154 if (FLOAT128_IEEE_P (TFmode))
3156 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3157 rs6000_vector_align[TFmode] = 128;
3161 /* V2DF mode, VSX only. */
3162 if (TARGET_VSX)
3164 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3165 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3166 rs6000_vector_align[V2DFmode] = align64;
3169 /* V4SF mode, either VSX or Altivec. */
3170 if (TARGET_VSX)
3172 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3173 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3174 rs6000_vector_align[V4SFmode] = align32;
3176 else if (TARGET_ALTIVEC)
3178 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3179 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3180 rs6000_vector_align[V4SFmode] = align32;
3183 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3184 and stores. */
3185 if (TARGET_ALTIVEC)
3187 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3188 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3189 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3190 rs6000_vector_align[V4SImode] = align32;
3191 rs6000_vector_align[V8HImode] = align32;
3192 rs6000_vector_align[V16QImode] = align32;
3194 if (TARGET_VSX)
3196 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3197 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3198 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3200 else
3202 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3203 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3204 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3208 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3209 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3210 if (TARGET_VSX)
3212 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3213 rs6000_vector_unit[V2DImode]
3214 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3215 rs6000_vector_align[V2DImode] = align64;
3217 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3218 rs6000_vector_unit[V1TImode]
3219 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3220 rs6000_vector_align[V1TImode] = 128;
3223 /* DFmode, see if we want to use the VSX unit. Memory is handled
3224 differently, so don't set rs6000_vector_mem. */
3225 if (TARGET_VSX)
3227 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3228 rs6000_vector_align[DFmode] = 64;
3231 /* SFmode, see if we want to use the VSX unit. */
3232 if (TARGET_P8_VECTOR)
3234 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3235 rs6000_vector_align[SFmode] = 32;
3238 /* Allow TImode in VSX register and set the VSX memory macros. */
3239 if (TARGET_VSX)
3241 rs6000_vector_mem[TImode] = VECTOR_VSX;
3242 rs6000_vector_align[TImode] = align64;
3245 /* TODO add paired floating point vector support. */
3247 /* Register class constraints for the constraints that depend on compile
3248 switches. When the VSX code was added, different constraints were added
3249 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3250 of the VSX registers are used. The register classes for scalar floating
3251 point types is set, based on whether we allow that type into the upper
3252 (Altivec) registers. GCC has register classes to target the Altivec
3253 registers for load/store operations, to select using a VSX memory
3254 operation instead of the traditional floating point operation. The
3255 constraints are:
3257 d - Register class to use with traditional DFmode instructions.
3258 f - Register class to use with traditional SFmode instructions.
3259 v - Altivec register.
3260 wa - Any VSX register.
3261 wc - Reserved to represent individual CR bits (used in LLVM).
3262 wd - Preferred register class for V2DFmode.
3263 wf - Preferred register class for V4SFmode.
3264 wg - Float register for power6x move insns.
3265 wh - FP register for direct move instructions.
3266 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3267 wj - FP or VSX register to hold 64-bit integers for direct moves.
3268 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3269 wl - Float register if we can do 32-bit signed int loads.
3270 wm - VSX register for ISA 2.07 direct move operations.
3271 wn - always NO_REGS.
3272 wr - GPR if 64-bit mode is permitted.
3273 ws - Register class to do ISA 2.06 DF operations.
3274 wt - VSX register for TImode in VSX registers.
3275 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3276 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3277 ww - Register class to do SF conversions in with VSX operations.
3278 wx - Float register if we can do 32-bit int stores.
3279 wy - Register class to do ISA 2.07 SF operations.
3280 wz - Float register if we can do 32-bit unsigned int loads.
3281 wH - Altivec register if SImode is allowed in VSX registers.
3282 wI - VSX register if SImode is allowed in VSX registers.
3283 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3284 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3286 if (TARGET_HARD_FLOAT)
3287 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3289 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3290 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3292 if (TARGET_VSX)
3294 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3295 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3296 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3297 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3298 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3299 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3300 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3303 /* Add conditional constraints based on various options, to allow us to
3304 collapse multiple insn patterns. */
3305 if (TARGET_ALTIVEC)
3306 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3308 if (TARGET_MFPGPR) /* DFmode */
3309 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3311 if (TARGET_LFIWAX)
3312 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3314 if (TARGET_DIRECT_MOVE)
3316 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3317 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3318 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3319 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3320 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3321 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3324 if (TARGET_POWERPC64)
3326 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3327 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3330 if (TARGET_P8_VECTOR) /* SFmode */
3332 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3333 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3334 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3336 else if (TARGET_VSX)
3337 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3339 if (TARGET_STFIWX)
3340 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3342 if (TARGET_LFIWZX)
3343 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3345 if (TARGET_FLOAT128_TYPE)
3347 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3348 if (FLOAT128_IEEE_P (TFmode))
3349 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3352 if (TARGET_P9_VECTOR)
3354 /* Support for new D-form instructions. */
3355 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3357 /* Support for ISA 3.0 (power9) vectors. */
3358 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3361 /* Support for new direct moves (ISA 3.0 + 64bit). */
3362 if (TARGET_DIRECT_MOVE_128)
3363 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3365 /* Support small integers in VSX registers. */
3366 if (TARGET_P8_VECTOR)
3368 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3369 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3370 if (TARGET_P9_VECTOR)
3372 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3373 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3377 /* Set up the reload helper and direct move functions. */
3378 if (TARGET_VSX || TARGET_ALTIVEC)
3380 if (TARGET_64BIT)
3382 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3383 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3384 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3385 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3386 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3387 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3388 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3389 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3390 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3391 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3392 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3393 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3394 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3395 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3396 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3397 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3398 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3399 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3400 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3401 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3403 if (FLOAT128_VECTOR_P (KFmode))
3405 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3406 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3409 if (FLOAT128_VECTOR_P (TFmode))
3411 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3412 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3415 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3416 available. */
3417 if (TARGET_NO_SDMODE_STACK)
3419 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3420 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3423 if (TARGET_VSX)
3425 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3426 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3429 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3431 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3432 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3433 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3434 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3435 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3436 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3437 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3438 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3439 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3441 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3442 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3443 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3444 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3445 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3446 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3447 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3448 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3449 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3451 if (FLOAT128_VECTOR_P (KFmode))
3453 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3454 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3457 if (FLOAT128_VECTOR_P (TFmode))
3459 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3460 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3464 else
3466 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3467 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3468 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3469 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3470 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3471 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3472 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3473 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3474 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3475 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3476 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3477 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3478 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3479 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3480 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3481 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3482 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3483 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3484 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3485 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3487 if (FLOAT128_VECTOR_P (KFmode))
3489 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3490 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3493 if (FLOAT128_IEEE_P (TFmode))
3495 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3496 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3499 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3500 available. */
3501 if (TARGET_NO_SDMODE_STACK)
3503 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3504 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3507 if (TARGET_VSX)
3509 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3510 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3513 if (TARGET_DIRECT_MOVE)
3515 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3516 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3517 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3521 reg_addr[DFmode].scalar_in_vmx_p = true;
3522 reg_addr[DImode].scalar_in_vmx_p = true;
3524 if (TARGET_P8_VECTOR)
3526 reg_addr[SFmode].scalar_in_vmx_p = true;
3527 reg_addr[SImode].scalar_in_vmx_p = true;
3529 if (TARGET_P9_VECTOR)
3531 reg_addr[HImode].scalar_in_vmx_p = true;
3532 reg_addr[QImode].scalar_in_vmx_p = true;
3537 /* Setup the fusion operations. */
3538 if (TARGET_P8_FUSION)
3540 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3541 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3542 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3543 if (TARGET_64BIT)
3544 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3547 if (TARGET_P9_FUSION)
3549 struct fuse_insns {
3550 enum machine_mode mode; /* mode of the fused type. */
3551 enum machine_mode pmode; /* pointer mode. */
3552 enum rs6000_reload_reg_type rtype; /* register type. */
3553 enum insn_code load; /* load insn. */
3554 enum insn_code store; /* store insn. */
3557 static const struct fuse_insns addis_insns[] = {
3558 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3559 CODE_FOR_fusion_vsx_di_sf_load,
3560 CODE_FOR_fusion_vsx_di_sf_store },
3562 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3563 CODE_FOR_fusion_vsx_si_sf_load,
3564 CODE_FOR_fusion_vsx_si_sf_store },
3566 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3567 CODE_FOR_fusion_vsx_di_df_load,
3568 CODE_FOR_fusion_vsx_di_df_store },
3570 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3571 CODE_FOR_fusion_vsx_si_df_load,
3572 CODE_FOR_fusion_vsx_si_df_store },
3574 { E_DImode, E_DImode, RELOAD_REG_FPR,
3575 CODE_FOR_fusion_vsx_di_di_load,
3576 CODE_FOR_fusion_vsx_di_di_store },
3578 { E_DImode, E_SImode, RELOAD_REG_FPR,
3579 CODE_FOR_fusion_vsx_si_di_load,
3580 CODE_FOR_fusion_vsx_si_di_store },
3582 { E_QImode, E_DImode, RELOAD_REG_GPR,
3583 CODE_FOR_fusion_gpr_di_qi_load,
3584 CODE_FOR_fusion_gpr_di_qi_store },
3586 { E_QImode, E_SImode, RELOAD_REG_GPR,
3587 CODE_FOR_fusion_gpr_si_qi_load,
3588 CODE_FOR_fusion_gpr_si_qi_store },
3590 { E_HImode, E_DImode, RELOAD_REG_GPR,
3591 CODE_FOR_fusion_gpr_di_hi_load,
3592 CODE_FOR_fusion_gpr_di_hi_store },
3594 { E_HImode, E_SImode, RELOAD_REG_GPR,
3595 CODE_FOR_fusion_gpr_si_hi_load,
3596 CODE_FOR_fusion_gpr_si_hi_store },
3598 { E_SImode, E_DImode, RELOAD_REG_GPR,
3599 CODE_FOR_fusion_gpr_di_si_load,
3600 CODE_FOR_fusion_gpr_di_si_store },
3602 { E_SImode, E_SImode, RELOAD_REG_GPR,
3603 CODE_FOR_fusion_gpr_si_si_load,
3604 CODE_FOR_fusion_gpr_si_si_store },
3606 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3607 CODE_FOR_fusion_gpr_di_sf_load,
3608 CODE_FOR_fusion_gpr_di_sf_store },
3610 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3611 CODE_FOR_fusion_gpr_si_sf_load,
3612 CODE_FOR_fusion_gpr_si_sf_store },
3614 { E_DImode, E_DImode, RELOAD_REG_GPR,
3615 CODE_FOR_fusion_gpr_di_di_load,
3616 CODE_FOR_fusion_gpr_di_di_store },
3618 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3619 CODE_FOR_fusion_gpr_di_df_load,
3620 CODE_FOR_fusion_gpr_di_df_store },
3623 machine_mode cur_pmode = Pmode;
3624 size_t i;
3626 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3628 machine_mode xmode = addis_insns[i].mode;
3629 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3631 if (addis_insns[i].pmode != cur_pmode)
3632 continue;
3634 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3635 continue;
3637 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3638 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3640 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3642 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3643 = addis_insns[i].load;
3644 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3645 = addis_insns[i].store;
3650 /* Note which types we support fusing TOC setup plus memory insn. We only do
3651 fused TOCs for medium/large code models. */
3652 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3653 && (TARGET_CMODEL != CMODEL_SMALL))
3655 reg_addr[QImode].fused_toc = true;
3656 reg_addr[HImode].fused_toc = true;
3657 reg_addr[SImode].fused_toc = true;
3658 reg_addr[DImode].fused_toc = true;
3659 if (TARGET_HARD_FLOAT)
3661 if (TARGET_SINGLE_FLOAT)
3662 reg_addr[SFmode].fused_toc = true;
3663 if (TARGET_DOUBLE_FLOAT)
3664 reg_addr[DFmode].fused_toc = true;
3668 /* Precalculate HARD_REGNO_NREGS. */
3669 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3670 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3671 rs6000_hard_regno_nregs[m][r]
3672 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3674 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3675 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3676 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3677 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3678 rs6000_hard_regno_mode_ok_p[m][r] = true;
3680 /* Precalculate CLASS_MAX_NREGS sizes. */
3681 for (c = 0; c < LIM_REG_CLASSES; ++c)
3683 int reg_size;
3685 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3686 reg_size = UNITS_PER_VSX_WORD;
3688 else if (c == ALTIVEC_REGS)
3689 reg_size = UNITS_PER_ALTIVEC_WORD;
3691 else if (c == FLOAT_REGS)
3692 reg_size = UNITS_PER_FP_WORD;
3694 else
3695 reg_size = UNITS_PER_WORD;
3697 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3699 machine_mode m2 = (machine_mode)m;
3700 int reg_size2 = reg_size;
3702 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3703 in VSX. */
3704 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3705 reg_size2 = UNITS_PER_FP_WORD;
3707 rs6000_class_max_nregs[m][c]
3708 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3712 /* Calculate which modes to automatically generate code to use a the
3713 reciprocal divide and square root instructions. In the future, possibly
3714 automatically generate the instructions even if the user did not specify
3715 -mrecip. The older machines double precision reciprocal sqrt estimate is
3716 not accurate enough. */
3717 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3718 if (TARGET_FRES)
3719 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3720 if (TARGET_FRE)
3721 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3722 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3723 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3724 if (VECTOR_UNIT_VSX_P (V2DFmode))
3725 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3727 if (TARGET_FRSQRTES)
3728 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3729 if (TARGET_FRSQRTE)
3730 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3731 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3732 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3733 if (VECTOR_UNIT_VSX_P (V2DFmode))
3734 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3736 if (rs6000_recip_control)
3738 if (!flag_finite_math_only)
3739 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3740 "-ffast-math");
3741 if (flag_trapping_math)
3742 warning (0, "%qs requires %qs or %qs", "-mrecip",
3743 "-fno-trapping-math", "-ffast-math");
3744 if (!flag_reciprocal_math)
3745 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3746 "-ffast-math");
3747 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3749 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3750 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3751 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3753 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3754 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3755 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3757 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3758 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3759 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3761 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3762 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3763 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3765 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3766 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3767 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3770 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3771 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3773 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3774 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3775 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3777 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3778 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3779 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3783 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3784 legitimate address support to figure out the appropriate addressing to
3785 use. */
3786 rs6000_setup_reg_addr_masks ();
3788 if (global_init_p || TARGET_DEBUG_TARGET)
3790 if (TARGET_DEBUG_REG)
3791 rs6000_debug_reg_global ();
3793 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3794 fprintf (stderr,
3795 "SImode variable mult cost = %d\n"
3796 "SImode constant mult cost = %d\n"
3797 "SImode short constant mult cost = %d\n"
3798 "DImode multipliciation cost = %d\n"
3799 "SImode division cost = %d\n"
3800 "DImode division cost = %d\n"
3801 "Simple fp operation cost = %d\n"
3802 "DFmode multiplication cost = %d\n"
3803 "SFmode division cost = %d\n"
3804 "DFmode division cost = %d\n"
3805 "cache line size = %d\n"
3806 "l1 cache size = %d\n"
3807 "l2 cache size = %d\n"
3808 "simultaneous prefetches = %d\n"
3809 "\n",
3810 rs6000_cost->mulsi,
3811 rs6000_cost->mulsi_const,
3812 rs6000_cost->mulsi_const9,
3813 rs6000_cost->muldi,
3814 rs6000_cost->divsi,
3815 rs6000_cost->divdi,
3816 rs6000_cost->fp,
3817 rs6000_cost->dmul,
3818 rs6000_cost->sdiv,
3819 rs6000_cost->ddiv,
3820 rs6000_cost->cache_line_size,
3821 rs6000_cost->l1_cache_size,
3822 rs6000_cost->l2_cache_size,
3823 rs6000_cost->simultaneous_prefetches);
3827 #if TARGET_MACHO
3828 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3830 static void
3831 darwin_rs6000_override_options (void)
3833 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3834 off. */
3835 rs6000_altivec_abi = 1;
3836 TARGET_ALTIVEC_VRSAVE = 1;
3837 rs6000_current_abi = ABI_DARWIN;
3839 if (DEFAULT_ABI == ABI_DARWIN
3840 && TARGET_64BIT)
3841 darwin_one_byte_bool = 1;
3843 if (TARGET_64BIT && ! TARGET_POWERPC64)
3845 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3846 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3848 if (flag_mkernel)
3850 rs6000_default_long_calls = 1;
3851 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3854 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3855 Altivec. */
3856 if (!flag_mkernel && !flag_apple_kext
3857 && TARGET_64BIT
3858 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3859 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3861 /* Unless the user (not the configurer) has explicitly overridden
3862 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3863 G4 unless targeting the kernel. */
3864 if (!flag_mkernel
3865 && !flag_apple_kext
3866 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3867 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3868 && ! global_options_set.x_rs6000_cpu_index)
3870 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3873 #endif
3875 /* If not otherwise specified by a target, make 'long double' equivalent to
3876 'double'. */
3878 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3879 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3880 #endif
3882 /* Return the builtin mask of the various options used that could affect which
3883 builtins were used. In the past we used target_flags, but we've run out of
3884 bits, and some options like PAIRED are no longer in target_flags. */
3886 HOST_WIDE_INT
3887 rs6000_builtin_mask_calculate (void)
3889 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3890 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3891 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3892 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3893 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3894 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3895 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3896 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3897 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3898 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3899 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3900 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3901 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3902 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3903 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3904 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3905 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3906 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3907 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3908 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3909 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3910 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3913 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3914 to clobber the XER[CA] bit because clobbering that bit without telling
3915 the compiler worked just fine with versions of GCC before GCC 5, and
3916 breaking a lot of older code in ways that are hard to track down is
3917 not such a great idea. */
3919 static rtx_insn *
3920 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3921 vec<const char *> &/*constraints*/,
3922 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3924 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3925 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3926 return NULL;
3929 /* Override command line options.
3931 Combine build-specific configuration information with options
3932 specified on the command line to set various state variables which
3933 influence code generation, optimization, and expansion of built-in
3934 functions. Assure that command-line configuration preferences are
3935 compatible with each other and with the build configuration; issue
3936 warnings while adjusting configuration or error messages while
3937 rejecting configuration.
3939 Upon entry to this function:
3941 This function is called once at the beginning of
3942 compilation, and then again at the start and end of compiling
3943 each section of code that has a different configuration, as
3944 indicated, for example, by adding the
3946 __attribute__((__target__("cpu=power9")))
3948 qualifier to a function definition or, for example, by bracketing
3949 code between
3951 #pragma GCC target("altivec")
3955 #pragma GCC reset_options
3957 directives. Parameter global_init_p is true for the initial
3958 invocation, which initializes global variables, and false for all
3959 subsequent invocations.
3962 Various global state information is assumed to be valid. This
3963 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3964 default CPU specified at build configure time, TARGET_DEFAULT,
3965 representing the default set of option flags for the default
3966 target, and global_options_set.x_rs6000_isa_flags, representing
3967 which options were requested on the command line.
3969 Upon return from this function:
3971 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3972 was set by name on the command line. Additionally, if certain
3973 attributes are automatically enabled or disabled by this function
3974 in order to assure compatibility between options and
3975 configuration, the flags associated with those attributes are
3976 also set. By setting these "explicit bits", we avoid the risk
3977 that other code might accidentally overwrite these particular
3978 attributes with "default values".
3980 The various bits of rs6000_isa_flags are set to indicate the
3981 target options that have been selected for the most current
3982 compilation efforts. This has the effect of also turning on the
3983 associated TARGET_XXX values since these are macros which are
3984 generally defined to test the corresponding bit of the
3985 rs6000_isa_flags variable.
3987 The variable rs6000_builtin_mask is set to represent the target
3988 options for the most current compilation efforts, consistent with
3989 the current contents of rs6000_isa_flags. This variable controls
3990 expansion of built-in functions.
3992 Various other global variables and fields of global structures
3993 (over 50 in all) are initialized to reflect the desired options
3994 for the most current compilation efforts. */
3996 static bool
3997 rs6000_option_override_internal (bool global_init_p)
3999 bool ret = true;
4001 HOST_WIDE_INT set_masks;
4002 HOST_WIDE_INT ignore_masks;
4003 int cpu_index = -1;
4004 int tune_index;
4005 struct cl_target_option *main_target_opt
4006 = ((global_init_p || target_option_default_node == NULL)
4007 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4009 /* Print defaults. */
4010 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4011 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4013 /* Remember the explicit arguments. */
4014 if (global_init_p)
4015 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4017 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4018 library functions, so warn about it. The flag may be useful for
4019 performance studies from time to time though, so don't disable it
4020 entirely. */
4021 if (global_options_set.x_rs6000_alignment_flags
4022 && rs6000_alignment_flags == MASK_ALIGN_POWER
4023 && DEFAULT_ABI == ABI_DARWIN
4024 && TARGET_64BIT)
4025 warning (0, "%qs is not supported for 64-bit Darwin;"
4026 " it is incompatible with the installed C and C++ libraries",
4027 "-malign-power");
4029 /* Numerous experiment shows that IRA based loop pressure
4030 calculation works better for RTL loop invariant motion on targets
4031 with enough (>= 32) registers. It is an expensive optimization.
4032 So it is on only for peak performance. */
4033 if (optimize >= 3 && global_init_p
4034 && !global_options_set.x_flag_ira_loop_pressure)
4035 flag_ira_loop_pressure = 1;
4037 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4038 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4039 options were already specified. */
4040 if (flag_sanitize & SANITIZE_USER_ADDRESS
4041 && !global_options_set.x_flag_asynchronous_unwind_tables)
4042 flag_asynchronous_unwind_tables = 1;
4044 /* Set the pointer size. */
4045 if (TARGET_64BIT)
4047 rs6000_pmode = DImode;
4048 rs6000_pointer_size = 64;
4050 else
4052 rs6000_pmode = SImode;
4053 rs6000_pointer_size = 32;
4056 /* Some OSs don't support saving the high part of 64-bit registers on context
4057 switch. Other OSs don't support saving Altivec registers. On those OSs,
4058 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4059 if the user wants either, the user must explicitly specify them and we
4060 won't interfere with the user's specification. */
4062 set_masks = POWERPC_MASKS;
4063 #ifdef OS_MISSING_POWERPC64
4064 if (OS_MISSING_POWERPC64)
4065 set_masks &= ~OPTION_MASK_POWERPC64;
4066 #endif
4067 #ifdef OS_MISSING_ALTIVEC
4068 if (OS_MISSING_ALTIVEC)
4069 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4070 | OTHER_VSX_VECTOR_MASKS);
4071 #endif
4073 /* Don't override by the processor default if given explicitly. */
4074 set_masks &= ~rs6000_isa_flags_explicit;
4076 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4077 the cpu in a target attribute or pragma, but did not specify a tuning
4078 option, use the cpu for the tuning option rather than the option specified
4079 with -mtune on the command line. Process a '--with-cpu' configuration
4080 request as an implicit --cpu. */
4081 if (rs6000_cpu_index >= 0)
4082 cpu_index = rs6000_cpu_index;
4083 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4084 cpu_index = main_target_opt->x_rs6000_cpu_index;
4085 else if (OPTION_TARGET_CPU_DEFAULT)
4086 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4088 if (cpu_index >= 0)
4090 const char *unavailable_cpu = NULL;
4091 switch (processor_target_table[cpu_index].processor)
4093 #ifndef HAVE_AS_POWER9
4094 case PROCESSOR_POWER9:
4095 unavailable_cpu = "power9";
4096 break;
4097 #endif
4098 #ifndef HAVE_AS_POWER8
4099 case PROCESSOR_POWER8:
4100 unavailable_cpu = "power8";
4101 break;
4102 #endif
4103 #ifndef HAVE_AS_POPCNTD
4104 case PROCESSOR_POWER7:
4105 unavailable_cpu = "power7";
4106 break;
4107 #endif
4108 #ifndef HAVE_AS_DFP
4109 case PROCESSOR_POWER6:
4110 unavailable_cpu = "power6";
4111 break;
4112 #endif
4113 #ifndef HAVE_AS_POPCNTB
4114 case PROCESSOR_POWER5:
4115 unavailable_cpu = "power5";
4116 break;
4117 #endif
4118 default:
4119 break;
4121 if (unavailable_cpu)
4123 cpu_index = -1;
4124 warning (0, "will not generate %qs instructions because "
4125 "assembler lacks %qs support", unavailable_cpu,
4126 unavailable_cpu);
4130 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4131 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4132 with those from the cpu, except for options that were explicitly set. If
4133 we don't have a cpu, do not override the target bits set in
4134 TARGET_DEFAULT. */
4135 if (cpu_index >= 0)
4137 rs6000_cpu_index = cpu_index;
4138 rs6000_isa_flags &= ~set_masks;
4139 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4140 & set_masks);
4142 else
4144 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4145 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4146 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4147 to using rs6000_isa_flags, we need to do the initialization here.
4149 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4150 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4151 HOST_WIDE_INT flags;
4152 if (TARGET_DEFAULT)
4153 flags = TARGET_DEFAULT;
4154 else
4156 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4157 const char *default_cpu = (!TARGET_POWERPC64
4158 ? "powerpc"
4159 : (BYTES_BIG_ENDIAN
4160 ? "powerpc64"
4161 : "powerpc64le"));
4162 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4163 flags = processor_target_table[default_cpu_index].target_enable;
4165 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4168 if (rs6000_tune_index >= 0)
4169 tune_index = rs6000_tune_index;
4170 else if (cpu_index >= 0)
4171 rs6000_tune_index = tune_index = cpu_index;
4172 else
4174 size_t i;
4175 enum processor_type tune_proc
4176 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4178 tune_index = -1;
4179 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4180 if (processor_target_table[i].processor == tune_proc)
4182 tune_index = i;
4183 break;
4187 gcc_assert (tune_index >= 0);
4188 rs6000_cpu = processor_target_table[tune_index].processor;
4190 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4191 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4192 || rs6000_cpu == PROCESSOR_PPCE5500)
4194 if (TARGET_ALTIVEC)
4195 error ("AltiVec not supported in this target");
4198 /* If we are optimizing big endian systems for space, use the load/store
4199 multiple and string instructions. */
4200 if (BYTES_BIG_ENDIAN && optimize_size)
4201 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4202 | OPTION_MASK_STRING);
4204 /* Don't allow -mmultiple or -mstring on little endian systems
4205 unless the cpu is a 750, because the hardware doesn't support the
4206 instructions used in little endian mode, and causes an alignment
4207 trap. The 750 does not cause an alignment trap (except when the
4208 target is unaligned). */
4210 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4212 if (TARGET_MULTIPLE)
4214 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4215 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4216 warning (0, "%qs is not supported on little endian systems",
4217 "-mmultiple");
4220 if (TARGET_STRING)
4222 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4223 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4224 warning (0, "%qs is not supported on little endian systems",
4225 "-mstring");
4229 /* If little-endian, default to -mstrict-align on older processors.
4230 Testing for htm matches power8 and later. */
4231 if (!BYTES_BIG_ENDIAN
4232 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4233 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4235 /* -maltivec={le,be} implies -maltivec. */
4236 if (rs6000_altivec_element_order != 0)
4237 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4239 /* Disallow -maltivec=le in big endian mode for now. This is not
4240 known to be useful for anyone. */
4241 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4243 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4244 rs6000_altivec_element_order = 0;
4247 if (!rs6000_fold_gimple)
4248 fprintf (stderr,
4249 "gimple folding of rs6000 builtins has been disabled.\n");
4251 /* Add some warnings for VSX. */
4252 if (TARGET_VSX)
4254 const char *msg = NULL;
4255 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4258 msg = N_("-mvsx requires hardware floating point");
4259 else
4261 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4262 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4265 else if (TARGET_PAIRED_FLOAT)
4266 msg = N_("-mvsx and -mpaired are incompatible");
4267 else if (TARGET_AVOID_XFORM > 0)
4268 msg = N_("-mvsx needs indexed addressing");
4269 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4270 & OPTION_MASK_ALTIVEC))
4272 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4273 msg = N_("-mvsx and -mno-altivec are incompatible");
4274 else
4275 msg = N_("-mno-altivec disables vsx");
4278 if (msg)
4280 warning (0, msg);
4281 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4282 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4286 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4287 the -mcpu setting to enable options that conflict. */
4288 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4289 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4290 | OPTION_MASK_ALTIVEC
4291 | OPTION_MASK_VSX)) != 0)
4292 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4293 | OPTION_MASK_DIRECT_MOVE)
4294 & ~rs6000_isa_flags_explicit);
4296 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4297 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4299 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4300 off all of the options that depend on those flags. */
4301 ignore_masks = rs6000_disable_incompatible_switches ();
4303 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4304 unless the user explicitly used the -mno-<option> to disable the code. */
4305 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4306 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4307 else if (TARGET_P9_MINMAX)
4309 if (cpu_index >= 0)
4311 if (cpu_index == PROCESSOR_POWER9)
4313 /* legacy behavior: allow -mcpu=power9 with certain
4314 capabilities explicitly disabled. */
4315 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4317 else
4318 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4319 "for <xxx> less than power9", "-mcpu");
4321 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4322 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4323 & rs6000_isa_flags_explicit))
4324 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4325 were explicitly cleared. */
4326 error ("%qs incompatible with explicitly disabled options",
4327 "-mpower9-minmax");
4328 else
4329 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4331 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4332 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4333 else if (TARGET_VSX)
4334 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4335 else if (TARGET_POPCNTD)
4336 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4337 else if (TARGET_DFP)
4338 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4339 else if (TARGET_CMPB)
4340 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4341 else if (TARGET_FPRND)
4342 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4343 else if (TARGET_POPCNTB)
4344 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4345 else if (TARGET_ALTIVEC)
4346 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4348 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4350 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4351 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4352 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4355 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4357 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4358 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4359 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4362 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4364 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4365 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4366 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4369 if (TARGET_P8_VECTOR && !TARGET_VSX)
4371 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4372 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4373 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4374 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4376 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4377 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4378 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4380 else
4382 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4383 not explicit. */
4384 rs6000_isa_flags |= OPTION_MASK_VSX;
4385 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4389 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4391 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4392 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4393 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4396 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4397 silently turn off quad memory mode. */
4398 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4400 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4401 warning (0, N_("-mquad-memory requires 64-bit mode"));
4403 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4404 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4406 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4407 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4410 /* Non-atomic quad memory load/store are disabled for little endian, since
4411 the words are reversed, but atomic operations can still be done by
4412 swapping the words. */
4413 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4415 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4416 warning (0, N_("-mquad-memory is not available in little endian "
4417 "mode"));
4419 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4422 /* Assume if the user asked for normal quad memory instructions, they want
4423 the atomic versions as well, unless they explicity told us not to use quad
4424 word atomic instructions. */
4425 if (TARGET_QUAD_MEMORY
4426 && !TARGET_QUAD_MEMORY_ATOMIC
4427 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4428 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4430 /* If we can shrink-wrap the TOC register save separately, then use
4431 -msave-toc-indirect unless explicitly disabled. */
4432 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4433 && flag_shrink_wrap_separate
4434 && optimize_function_for_speed_p (cfun))
4435 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4437 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4438 generating power8 instructions. */
4439 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4440 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4441 & OPTION_MASK_P8_FUSION);
4443 /* Setting additional fusion flags turns on base fusion. */
4444 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4446 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4448 if (TARGET_P8_FUSION_SIGN)
4449 error ("%qs requires %qs", "-mpower8-fusion-sign",
4450 "-mpower8-fusion");
4452 if (TARGET_TOC_FUSION)
4453 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4455 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4457 else
4458 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4461 /* Power9 fusion is a superset over power8 fusion. */
4462 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4464 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4466 /* We prefer to not mention undocumented options in
4467 error messages. However, if users have managed to select
4468 power9-fusion without selecting power8-fusion, they
4469 already know about undocumented flags. */
4470 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4471 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4473 else
4474 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4477 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4478 generating power9 instructions. */
4479 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4480 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4481 & OPTION_MASK_P9_FUSION);
4483 /* Power8 does not fuse sign extended loads with the addis. If we are
4484 optimizing at high levels for speed, convert a sign extended load into a
4485 zero extending load, and an explicit sign extension. */
4486 if (TARGET_P8_FUSION
4487 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4488 && optimize_function_for_speed_p (cfun)
4489 && optimize >= 3)
4490 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4492 /* TOC fusion requires 64-bit and medium/large code model. */
4493 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4495 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4496 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4497 warning (0, N_("-mtoc-fusion requires 64-bit"));
4500 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4502 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4503 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4504 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4507 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4508 model. */
4509 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4510 && (TARGET_CMODEL != CMODEL_SMALL)
4511 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4512 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4514 /* ISA 3.0 vector instructions include ISA 2.07. */
4515 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4517 /* We prefer to not mention undocumented options in
4518 error messages. However, if users have managed to select
4519 power9-vector without selecting power8-vector, they
4520 already know about undocumented flags. */
4521 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4522 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4523 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4524 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4526 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4527 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4528 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4530 else
4532 /* OPTION_MASK_P9_VECTOR is explicit and
4533 OPTION_MASK_P8_VECTOR is not explicit. */
4534 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4535 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4539 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4540 support. If we only have ISA 2.06 support, and the user did not specify
4541 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4542 but we don't enable the full vectorization support */
4543 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4544 TARGET_ALLOW_MOVMISALIGN = 1;
4546 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4548 if (TARGET_ALLOW_MOVMISALIGN > 0
4549 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4550 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4552 TARGET_ALLOW_MOVMISALIGN = 0;
4555 /* Determine when unaligned vector accesses are permitted, and when
4556 they are preferred over masked Altivec loads. Note that if
4557 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4558 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4559 not true. */
4560 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4562 if (!TARGET_VSX)
4564 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4565 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4567 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4570 else if (!TARGET_ALLOW_MOVMISALIGN)
4572 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4573 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4574 "-mallow-movmisalign");
4576 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4580 /* Set long double size before the IEEE 128-bit tests. */
4581 if (!global_options_set.x_rs6000_long_double_type_size)
4583 if (main_target_opt != NULL
4584 && (main_target_opt->x_rs6000_long_double_type_size
4585 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4586 error ("target attribute or pragma changes long double size");
4587 else
4588 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4591 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4592 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4593 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4594 those systems will not pick up this default. Warn if the user changes the
4595 default unless -Wno-psabi. */
4596 if (!global_options_set.x_rs6000_ieeequad)
4597 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4599 else if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4601 static bool warned_change_long_double;
4602 if (!warned_change_long_double)
4604 warned_change_long_double = true;
4605 if (TARGET_IEEEQUAD)
4606 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4607 else
4608 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4612 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4613 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4614 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4615 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4616 the keyword as well as the type. */
4617 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4619 /* IEEE 128-bit floating point requires VSX support. */
4620 if (TARGET_FLOAT128_KEYWORD)
4622 if (!TARGET_VSX)
4624 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4625 error ("%qs requires VSX support", "-mfloat128");
4627 TARGET_FLOAT128_TYPE = 0;
4628 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4629 | OPTION_MASK_FLOAT128_HW);
4631 else if (!TARGET_FLOAT128_TYPE)
4633 TARGET_FLOAT128_TYPE = 1;
4634 warning (0, "The -mfloat128 option may not be fully supported");
4638 /* Enable the __float128 keyword under Linux by default. */
4639 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4640 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4641 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4643 /* If we have are supporting the float128 type and full ISA 3.0 support,
4644 enable -mfloat128-hardware by default. However, don't enable the
4645 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4646 because sometimes the compiler wants to put things in an integer
4647 container, and if we don't have __int128 support, it is impossible. */
4648 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4649 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4650 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4651 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4653 if (TARGET_FLOAT128_HW
4654 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4656 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4657 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4659 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4662 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4664 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4665 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4667 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4670 /* Print the options after updating the defaults. */
4671 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4672 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4674 /* E500mc does "better" if we inline more aggressively. Respect the
4675 user's opinion, though. */
4676 if (rs6000_block_move_inline_limit == 0
4677 && (rs6000_cpu == PROCESSOR_PPCE500MC
4678 || rs6000_cpu == PROCESSOR_PPCE500MC64
4679 || rs6000_cpu == PROCESSOR_PPCE5500
4680 || rs6000_cpu == PROCESSOR_PPCE6500))
4681 rs6000_block_move_inline_limit = 128;
4683 /* store_one_arg depends on expand_block_move to handle at least the
4684 size of reg_parm_stack_space. */
4685 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4686 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4688 if (global_init_p)
4690 /* If the appropriate debug option is enabled, replace the target hooks
4691 with debug versions that call the real version and then prints
4692 debugging information. */
4693 if (TARGET_DEBUG_COST)
4695 targetm.rtx_costs = rs6000_debug_rtx_costs;
4696 targetm.address_cost = rs6000_debug_address_cost;
4697 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4700 if (TARGET_DEBUG_ADDR)
4702 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4703 targetm.legitimize_address = rs6000_debug_legitimize_address;
4704 rs6000_secondary_reload_class_ptr
4705 = rs6000_debug_secondary_reload_class;
4706 targetm.secondary_memory_needed
4707 = rs6000_debug_secondary_memory_needed;
4708 targetm.can_change_mode_class
4709 = rs6000_debug_can_change_mode_class;
4710 rs6000_preferred_reload_class_ptr
4711 = rs6000_debug_preferred_reload_class;
4712 rs6000_legitimize_reload_address_ptr
4713 = rs6000_debug_legitimize_reload_address;
4714 rs6000_mode_dependent_address_ptr
4715 = rs6000_debug_mode_dependent_address;
4718 if (rs6000_veclibabi_name)
4720 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4721 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4722 else
4724 error ("unknown vectorization library ABI type (%qs) for "
4725 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4726 ret = false;
4731 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4732 target attribute or pragma which automatically enables both options,
4733 unless the altivec ABI was set. This is set by default for 64-bit, but
4734 not for 32-bit. */
4735 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4737 TARGET_FLOAT128_TYPE = 0;
4738 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4739 | OPTION_MASK_FLOAT128_KEYWORD)
4740 & ~rs6000_isa_flags_explicit);
4743 /* Enable Altivec ABI for AIX -maltivec. */
4744 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4746 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4747 error ("target attribute or pragma changes AltiVec ABI");
4748 else
4749 rs6000_altivec_abi = 1;
4752 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4753 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4754 be explicitly overridden in either case. */
4755 if (TARGET_ELF)
4757 if (!global_options_set.x_rs6000_altivec_abi
4758 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4760 if (main_target_opt != NULL &&
4761 !main_target_opt->x_rs6000_altivec_abi)
4762 error ("target attribute or pragma changes AltiVec ABI");
4763 else
4764 rs6000_altivec_abi = 1;
4768 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4769 So far, the only darwin64 targets are also MACH-O. */
4770 if (TARGET_MACHO
4771 && DEFAULT_ABI == ABI_DARWIN
4772 && TARGET_64BIT)
4774 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4775 error ("target attribute or pragma changes darwin64 ABI");
4776 else
4778 rs6000_darwin64_abi = 1;
4779 /* Default to natural alignment, for better performance. */
4780 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4784 /* Place FP constants in the constant pool instead of TOC
4785 if section anchors enabled. */
4786 if (flag_section_anchors
4787 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4788 TARGET_NO_FP_IN_TOC = 1;
4790 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4791 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4793 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4794 SUBTARGET_OVERRIDE_OPTIONS;
4795 #endif
4796 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4797 SUBSUBTARGET_OVERRIDE_OPTIONS;
4798 #endif
4799 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4800 SUB3TARGET_OVERRIDE_OPTIONS;
4801 #endif
4803 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4804 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4806 /* For the E500 family of cores, reset the single/double FP flags to let us
4807 check that they remain constant across attributes or pragmas. Also,
4808 clear a possible request for string instructions, not supported and which
4809 we might have silently queried above for -Os. */
4811 switch (rs6000_cpu)
4813 case PROCESSOR_PPC8540:
4814 case PROCESSOR_PPC8548:
4815 case PROCESSOR_PPCE500MC:
4816 case PROCESSOR_PPCE500MC64:
4817 case PROCESSOR_PPCE5500:
4818 case PROCESSOR_PPCE6500:
4819 rs6000_single_float = 0;
4820 rs6000_double_float = 0;
4821 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4822 break;
4824 default:
4825 break;
4828 if (main_target_opt)
4830 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4831 error ("target attribute or pragma changes single precision floating "
4832 "point");
4833 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4834 error ("target attribute or pragma changes double precision floating "
4835 "point");
4838 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4839 && rs6000_cpu != PROCESSOR_POWER5
4840 && rs6000_cpu != PROCESSOR_POWER6
4841 && rs6000_cpu != PROCESSOR_POWER7
4842 && rs6000_cpu != PROCESSOR_POWER8
4843 && rs6000_cpu != PROCESSOR_POWER9
4844 && rs6000_cpu != PROCESSOR_PPCA2
4845 && rs6000_cpu != PROCESSOR_CELL
4846 && rs6000_cpu != PROCESSOR_PPC476);
4847 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4848 || rs6000_cpu == PROCESSOR_POWER5
4849 || rs6000_cpu == PROCESSOR_POWER7
4850 || rs6000_cpu == PROCESSOR_POWER8);
4851 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4852 || rs6000_cpu == PROCESSOR_POWER5
4853 || rs6000_cpu == PROCESSOR_POWER6
4854 || rs6000_cpu == PROCESSOR_POWER7
4855 || rs6000_cpu == PROCESSOR_POWER8
4856 || rs6000_cpu == PROCESSOR_POWER9
4857 || rs6000_cpu == PROCESSOR_PPCE500MC
4858 || rs6000_cpu == PROCESSOR_PPCE500MC64
4859 || rs6000_cpu == PROCESSOR_PPCE5500
4860 || rs6000_cpu == PROCESSOR_PPCE6500);
4862 /* Allow debug switches to override the above settings. These are set to -1
4863 in rs6000.opt to indicate the user hasn't directly set the switch. */
4864 if (TARGET_ALWAYS_HINT >= 0)
4865 rs6000_always_hint = TARGET_ALWAYS_HINT;
4867 if (TARGET_SCHED_GROUPS >= 0)
4868 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4870 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4871 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4873 rs6000_sched_restricted_insns_priority
4874 = (rs6000_sched_groups ? 1 : 0);
4876 /* Handle -msched-costly-dep option. */
4877 rs6000_sched_costly_dep
4878 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4880 if (rs6000_sched_costly_dep_str)
4882 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4883 rs6000_sched_costly_dep = no_dep_costly;
4884 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4885 rs6000_sched_costly_dep = all_deps_costly;
4886 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4887 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4888 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4889 rs6000_sched_costly_dep = store_to_load_dep_costly;
4890 else
4891 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4892 atoi (rs6000_sched_costly_dep_str));
4895 /* Handle -minsert-sched-nops option. */
4896 rs6000_sched_insert_nops
4897 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4899 if (rs6000_sched_insert_nops_str)
4901 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4902 rs6000_sched_insert_nops = sched_finish_none;
4903 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4904 rs6000_sched_insert_nops = sched_finish_pad_groups;
4905 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4906 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4907 else
4908 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4909 atoi (rs6000_sched_insert_nops_str));
4912 /* Handle stack protector */
4913 if (!global_options_set.x_rs6000_stack_protector_guard)
4914 #ifdef TARGET_THREAD_SSP_OFFSET
4915 rs6000_stack_protector_guard = SSP_TLS;
4916 #else
4917 rs6000_stack_protector_guard = SSP_GLOBAL;
4918 #endif
4920 #ifdef TARGET_THREAD_SSP_OFFSET
4921 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4922 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4923 #endif
4925 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4927 char *endp;
4928 const char *str = rs6000_stack_protector_guard_offset_str;
4930 errno = 0;
4931 long offset = strtol (str, &endp, 0);
4932 if (!*str || *endp || errno)
4933 error ("%qs is not a valid number in %qs", str,
4934 "-mstack-protector-guard-offset=");
4936 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4937 || (TARGET_64BIT && (offset & 3)))
4938 error ("%qs is not a valid offset in %qs", str,
4939 "-mstack-protector-guard-offset=");
4941 rs6000_stack_protector_guard_offset = offset;
4944 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4946 const char *str = rs6000_stack_protector_guard_reg_str;
4947 int reg = decode_reg_name (str);
4949 if (!IN_RANGE (reg, 1, 31))
4950 error ("%qs is not a valid base register in %qs", str,
4951 "-mstack-protector-guard-reg=");
4953 rs6000_stack_protector_guard_reg = reg;
4956 if (rs6000_stack_protector_guard == SSP_TLS
4957 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4958 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4960 if (global_init_p)
4962 #ifdef TARGET_REGNAMES
4963 /* If the user desires alternate register names, copy in the
4964 alternate names now. */
4965 if (TARGET_REGNAMES)
4966 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4967 #endif
4969 /* Set aix_struct_return last, after the ABI is determined.
4970 If -maix-struct-return or -msvr4-struct-return was explicitly
4971 used, don't override with the ABI default. */
4972 if (!global_options_set.x_aix_struct_return)
4973 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4975 #if 0
4976 /* IBM XL compiler defaults to unsigned bitfields. */
4977 if (TARGET_XL_COMPAT)
4978 flag_signed_bitfields = 0;
4979 #endif
4981 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4982 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4984 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4986 /* We can only guarantee the availability of DI pseudo-ops when
4987 assembling for 64-bit targets. */
4988 if (!TARGET_64BIT)
4990 targetm.asm_out.aligned_op.di = NULL;
4991 targetm.asm_out.unaligned_op.di = NULL;
4995 /* Set branch target alignment, if not optimizing for size. */
4996 if (!optimize_size)
4998 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4999 aligned 8byte to avoid misprediction by the branch predictor. */
5000 if (rs6000_cpu == PROCESSOR_TITAN
5001 || rs6000_cpu == PROCESSOR_CELL)
5003 if (align_functions <= 0)
5004 align_functions = 8;
5005 if (align_jumps <= 0)
5006 align_jumps = 8;
5007 if (align_loops <= 0)
5008 align_loops = 8;
5010 if (rs6000_align_branch_targets)
5012 if (align_functions <= 0)
5013 align_functions = 16;
5014 if (align_jumps <= 0)
5015 align_jumps = 16;
5016 if (align_loops <= 0)
5018 can_override_loop_align = 1;
5019 align_loops = 16;
5022 if (align_jumps_max_skip <= 0)
5023 align_jumps_max_skip = 15;
5024 if (align_loops_max_skip <= 0)
5025 align_loops_max_skip = 15;
5028 /* Arrange to save and restore machine status around nested functions. */
5029 init_machine_status = rs6000_init_machine_status;
5031 /* We should always be splitting complex arguments, but we can't break
5032 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5033 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5034 targetm.calls.split_complex_arg = NULL;
5036 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5037 if (DEFAULT_ABI == ABI_AIX)
5038 targetm.calls.custom_function_descriptors = 0;
5041 /* Initialize rs6000_cost with the appropriate target costs. */
5042 if (optimize_size)
5043 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5044 else
5045 switch (rs6000_cpu)
5047 case PROCESSOR_RS64A:
5048 rs6000_cost = &rs64a_cost;
5049 break;
5051 case PROCESSOR_MPCCORE:
5052 rs6000_cost = &mpccore_cost;
5053 break;
5055 case PROCESSOR_PPC403:
5056 rs6000_cost = &ppc403_cost;
5057 break;
5059 case PROCESSOR_PPC405:
5060 rs6000_cost = &ppc405_cost;
5061 break;
5063 case PROCESSOR_PPC440:
5064 rs6000_cost = &ppc440_cost;
5065 break;
5067 case PROCESSOR_PPC476:
5068 rs6000_cost = &ppc476_cost;
5069 break;
5071 case PROCESSOR_PPC601:
5072 rs6000_cost = &ppc601_cost;
5073 break;
5075 case PROCESSOR_PPC603:
5076 rs6000_cost = &ppc603_cost;
5077 break;
5079 case PROCESSOR_PPC604:
5080 rs6000_cost = &ppc604_cost;
5081 break;
5083 case PROCESSOR_PPC604e:
5084 rs6000_cost = &ppc604e_cost;
5085 break;
5087 case PROCESSOR_PPC620:
5088 rs6000_cost = &ppc620_cost;
5089 break;
5091 case PROCESSOR_PPC630:
5092 rs6000_cost = &ppc630_cost;
5093 break;
5095 case PROCESSOR_CELL:
5096 rs6000_cost = &ppccell_cost;
5097 break;
5099 case PROCESSOR_PPC750:
5100 case PROCESSOR_PPC7400:
5101 rs6000_cost = &ppc750_cost;
5102 break;
5104 case PROCESSOR_PPC7450:
5105 rs6000_cost = &ppc7450_cost;
5106 break;
5108 case PROCESSOR_PPC8540:
5109 case PROCESSOR_PPC8548:
5110 rs6000_cost = &ppc8540_cost;
5111 break;
5113 case PROCESSOR_PPCE300C2:
5114 case PROCESSOR_PPCE300C3:
5115 rs6000_cost = &ppce300c2c3_cost;
5116 break;
5118 case PROCESSOR_PPCE500MC:
5119 rs6000_cost = &ppce500mc_cost;
5120 break;
5122 case PROCESSOR_PPCE500MC64:
5123 rs6000_cost = &ppce500mc64_cost;
5124 break;
5126 case PROCESSOR_PPCE5500:
5127 rs6000_cost = &ppce5500_cost;
5128 break;
5130 case PROCESSOR_PPCE6500:
5131 rs6000_cost = &ppce6500_cost;
5132 break;
5134 case PROCESSOR_TITAN:
5135 rs6000_cost = &titan_cost;
5136 break;
5138 case PROCESSOR_POWER4:
5139 case PROCESSOR_POWER5:
5140 rs6000_cost = &power4_cost;
5141 break;
5143 case PROCESSOR_POWER6:
5144 rs6000_cost = &power6_cost;
5145 break;
5147 case PROCESSOR_POWER7:
5148 rs6000_cost = &power7_cost;
5149 break;
5151 case PROCESSOR_POWER8:
5152 rs6000_cost = &power8_cost;
5153 break;
5155 case PROCESSOR_POWER9:
5156 rs6000_cost = &power9_cost;
5157 break;
5159 case PROCESSOR_PPCA2:
5160 rs6000_cost = &ppca2_cost;
5161 break;
5163 default:
5164 gcc_unreachable ();
5167 if (global_init_p)
5169 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5170 rs6000_cost->simultaneous_prefetches,
5171 global_options.x_param_values,
5172 global_options_set.x_param_values);
5173 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5174 global_options.x_param_values,
5175 global_options_set.x_param_values);
5176 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5177 rs6000_cost->cache_line_size,
5178 global_options.x_param_values,
5179 global_options_set.x_param_values);
5180 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5181 global_options.x_param_values,
5182 global_options_set.x_param_values);
5184 /* Increase loop peeling limits based on performance analysis. */
5185 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5186 global_options.x_param_values,
5187 global_options_set.x_param_values);
5188 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5189 global_options.x_param_values,
5190 global_options_set.x_param_values);
5192 /* Use the 'model' -fsched-pressure algorithm by default. */
5193 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5194 SCHED_PRESSURE_MODEL,
5195 global_options.x_param_values,
5196 global_options_set.x_param_values);
5198 /* If using typedef char *va_list, signal that
5199 __builtin_va_start (&ap, 0) can be optimized to
5200 ap = __builtin_next_arg (0). */
5201 if (DEFAULT_ABI != ABI_V4)
5202 targetm.expand_builtin_va_start = NULL;
5205 /* Set up single/double float flags.
5206 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5207 then set both flags. */
5208 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5209 rs6000_single_float = rs6000_double_float = 1;
5211 /* If not explicitly specified via option, decide whether to generate indexed
5212 load/store instructions. A value of -1 indicates that the
5213 initial value of this variable has not been overwritten. During
5214 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5215 if (TARGET_AVOID_XFORM == -1)
5216 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5217 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5218 need indexed accesses and the type used is the scalar type of the element
5219 being loaded or stored. */
5220 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5221 && !TARGET_ALTIVEC);
5223 /* Set the -mrecip options. */
5224 if (rs6000_recip_name)
5226 char *p = ASTRDUP (rs6000_recip_name);
5227 char *q;
5228 unsigned int mask, i;
5229 bool invert;
5231 while ((q = strtok (p, ",")) != NULL)
5233 p = NULL;
5234 if (*q == '!')
5236 invert = true;
5237 q++;
5239 else
5240 invert = false;
5242 if (!strcmp (q, "default"))
5243 mask = ((TARGET_RECIP_PRECISION)
5244 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5245 else
5247 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5248 if (!strcmp (q, recip_options[i].string))
5250 mask = recip_options[i].mask;
5251 break;
5254 if (i == ARRAY_SIZE (recip_options))
5256 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5257 invert = false;
5258 mask = 0;
5259 ret = false;
5263 if (invert)
5264 rs6000_recip_control &= ~mask;
5265 else
5266 rs6000_recip_control |= mask;
5270 /* Set the builtin mask of the various options used that could affect which
5271 builtins were used. In the past we used target_flags, but we've run out
5272 of bits, and some options like PAIRED are no longer in target_flags. */
5273 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5274 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5275 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5276 rs6000_builtin_mask);
5278 /* Initialize all of the registers. */
5279 rs6000_init_hard_regno_mode_ok (global_init_p);
5281 /* Save the initial options in case the user does function specific options */
5282 if (global_init_p)
5283 target_option_default_node = target_option_current_node
5284 = build_target_option_node (&global_options);
5286 /* If not explicitly specified via option, decide whether to generate the
5287 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5288 if (TARGET_LINK_STACK == -1)
5289 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5291 return ret;
5294 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5295 define the target cpu type. */
5297 static void
5298 rs6000_option_override (void)
5300 (void) rs6000_option_override_internal (true);
5304 /* Implement targetm.vectorize.builtin_mask_for_load. */
5305 static tree
5306 rs6000_builtin_mask_for_load (void)
5308 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5309 if ((TARGET_ALTIVEC && !TARGET_VSX)
5310 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5311 return altivec_builtin_mask_for_load;
5312 else
5313 return 0;
5316 /* Implement LOOP_ALIGN. */
5318 rs6000_loop_align (rtx label)
5320 basic_block bb;
5321 int ninsns;
5323 /* Don't override loop alignment if -falign-loops was specified. */
5324 if (!can_override_loop_align)
5325 return align_loops_log;
5327 bb = BLOCK_FOR_INSN (label);
5328 ninsns = num_loop_insns(bb->loop_father);
5330 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5331 if (ninsns > 4 && ninsns <= 8
5332 && (rs6000_cpu == PROCESSOR_POWER4
5333 || rs6000_cpu == PROCESSOR_POWER5
5334 || rs6000_cpu == PROCESSOR_POWER6
5335 || rs6000_cpu == PROCESSOR_POWER7
5336 || rs6000_cpu == PROCESSOR_POWER8
5337 || rs6000_cpu == PROCESSOR_POWER9))
5338 return 5;
5339 else
5340 return align_loops_log;
5343 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5344 static int
5345 rs6000_loop_align_max_skip (rtx_insn *label)
5347 return (1 << rs6000_loop_align (label)) - 1;
5350 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5351 after applying N number of iterations. This routine does not determine
5352 how may iterations are required to reach desired alignment. */
5354 static bool
5355 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5357 if (is_packed)
5358 return false;
5360 if (TARGET_32BIT)
5362 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5363 return true;
5365 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5366 return true;
5368 return false;
5370 else
5372 if (TARGET_MACHO)
5373 return false;
5375 /* Assuming that all other types are naturally aligned. CHECKME! */
5376 return true;
5380 /* Return true if the vector misalignment factor is supported by the
5381 target. */
5382 static bool
5383 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5384 const_tree type,
5385 int misalignment,
5386 bool is_packed)
5388 if (TARGET_VSX)
5390 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5391 return true;
5393 /* Return if movmisalign pattern is not supported for this mode. */
5394 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5395 return false;
5397 if (misalignment == -1)
5399 /* Misalignment factor is unknown at compile time but we know
5400 it's word aligned. */
5401 if (rs6000_vector_alignment_reachable (type, is_packed))
5403 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5405 if (element_size == 64 || element_size == 32)
5406 return true;
5409 return false;
5412 /* VSX supports word-aligned vector. */
5413 if (misalignment % 4 == 0)
5414 return true;
5416 return false;
5419 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5420 static int
5421 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5422 tree vectype, int misalign)
5424 unsigned elements;
5425 tree elem_type;
5427 switch (type_of_cost)
5429 case scalar_stmt:
5430 case scalar_load:
5431 case scalar_store:
5432 case vector_stmt:
5433 case vector_load:
5434 case vector_store:
5435 case vec_to_scalar:
5436 case scalar_to_vec:
5437 case cond_branch_not_taken:
5438 return 1;
5440 case vec_perm:
5441 if (TARGET_VSX)
5442 return 3;
5443 else
5444 return 1;
5446 case vec_promote_demote:
5447 if (TARGET_VSX)
5448 return 4;
5449 else
5450 return 1;
5452 case cond_branch_taken:
5453 return 3;
5455 case unaligned_load:
5456 case vector_gather_load:
5457 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5458 return 1;
5460 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5462 elements = TYPE_VECTOR_SUBPARTS (vectype);
5463 if (elements == 2)
5464 /* Double word aligned. */
5465 return 2;
5467 if (elements == 4)
5469 switch (misalign)
5471 case 8:
5472 /* Double word aligned. */
5473 return 2;
5475 case -1:
5476 /* Unknown misalignment. */
5477 case 4:
5478 case 12:
5479 /* Word aligned. */
5480 return 22;
5482 default:
5483 gcc_unreachable ();
5488 if (TARGET_ALTIVEC)
5489 /* Misaligned loads are not supported. */
5490 gcc_unreachable ();
5492 return 2;
5494 case unaligned_store:
5495 case vector_scatter_store:
5496 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5497 return 1;
5499 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5501 elements = TYPE_VECTOR_SUBPARTS (vectype);
5502 if (elements == 2)
5503 /* Double word aligned. */
5504 return 2;
5506 if (elements == 4)
5508 switch (misalign)
5510 case 8:
5511 /* Double word aligned. */
5512 return 2;
5514 case -1:
5515 /* Unknown misalignment. */
5516 case 4:
5517 case 12:
5518 /* Word aligned. */
5519 return 23;
5521 default:
5522 gcc_unreachable ();
5527 if (TARGET_ALTIVEC)
5528 /* Misaligned stores are not supported. */
5529 gcc_unreachable ();
5531 return 2;
5533 case vec_construct:
5534 /* This is a rough approximation assuming non-constant elements
5535 constructed into a vector via element insertion. FIXME:
5536 vec_construct is not granular enough for uniformly good
5537 decisions. If the initialization is a splat, this is
5538 cheaper than we estimate. Improve this someday. */
5539 elem_type = TREE_TYPE (vectype);
5540 /* 32-bit vectors loaded into registers are stored as double
5541 precision, so we need 2 permutes, 2 converts, and 1 merge
5542 to construct a vector of short floats from them. */
5543 if (SCALAR_FLOAT_TYPE_P (elem_type)
5544 && TYPE_PRECISION (elem_type) == 32)
5545 return 5;
5546 /* On POWER9, integer vector types are built up in GPRs and then
5547 use a direct move (2 cycles). For POWER8 this is even worse,
5548 as we need two direct moves and a merge, and the direct moves
5549 are five cycles. */
5550 else if (INTEGRAL_TYPE_P (elem_type))
5552 if (TARGET_P9_VECTOR)
5553 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5554 else
5555 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5557 else
5558 /* V2DFmode doesn't need a direct move. */
5559 return 2;
5561 default:
5562 gcc_unreachable ();
5566 /* Implement targetm.vectorize.preferred_simd_mode. */
5568 static machine_mode
5569 rs6000_preferred_simd_mode (scalar_mode mode)
5571 if (TARGET_VSX)
5572 switch (mode)
5574 case E_DFmode:
5575 return V2DFmode;
5576 default:;
5578 if (TARGET_ALTIVEC || TARGET_VSX)
5579 switch (mode)
5581 case E_SFmode:
5582 return V4SFmode;
5583 case E_TImode:
5584 return V1TImode;
5585 case E_DImode:
5586 return V2DImode;
5587 case E_SImode:
5588 return V4SImode;
5589 case E_HImode:
5590 return V8HImode;
5591 case E_QImode:
5592 return V16QImode;
5593 default:;
5595 if (TARGET_PAIRED_FLOAT
5596 && mode == SFmode)
5597 return V2SFmode;
5598 return word_mode;
5601 typedef struct _rs6000_cost_data
5603 struct loop *loop_info;
5604 unsigned cost[3];
5605 } rs6000_cost_data;
5607 /* Test for likely overcommitment of vector hardware resources. If a
5608 loop iteration is relatively large, and too large a percentage of
5609 instructions in the loop are vectorized, the cost model may not
5610 adequately reflect delays from unavailable vector resources.
5611 Penalize the loop body cost for this case. */
5613 static void
5614 rs6000_density_test (rs6000_cost_data *data)
5616 const int DENSITY_PCT_THRESHOLD = 85;
5617 const int DENSITY_SIZE_THRESHOLD = 70;
5618 const int DENSITY_PENALTY = 10;
5619 struct loop *loop = data->loop_info;
5620 basic_block *bbs = get_loop_body (loop);
5621 int nbbs = loop->num_nodes;
5622 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5623 int i, density_pct;
5625 for (i = 0; i < nbbs; i++)
5627 basic_block bb = bbs[i];
5628 gimple_stmt_iterator gsi;
5630 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5632 gimple *stmt = gsi_stmt (gsi);
5633 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5635 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5636 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5637 not_vec_cost++;
5641 free (bbs);
5642 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5644 if (density_pct > DENSITY_PCT_THRESHOLD
5645 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5647 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5648 if (dump_enabled_p ())
5649 dump_printf_loc (MSG_NOTE, vect_location,
5650 "density %d%%, cost %d exceeds threshold, penalizing "
5651 "loop body cost by %d%%", density_pct,
5652 vec_cost + not_vec_cost, DENSITY_PENALTY);
5656 /* Implement targetm.vectorize.init_cost. */
5658 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5659 instruction is needed by the vectorization. */
5660 static bool rs6000_vect_nonmem;
5662 static void *
5663 rs6000_init_cost (struct loop *loop_info)
5665 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5666 data->loop_info = loop_info;
5667 data->cost[vect_prologue] = 0;
5668 data->cost[vect_body] = 0;
5669 data->cost[vect_epilogue] = 0;
5670 rs6000_vect_nonmem = false;
5671 return data;
5674 /* Implement targetm.vectorize.add_stmt_cost. */
5676 static unsigned
5677 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5678 struct _stmt_vec_info *stmt_info, int misalign,
5679 enum vect_cost_model_location where)
5681 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5682 unsigned retval = 0;
5684 if (flag_vect_cost_model)
5686 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5687 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5688 misalign);
5689 /* Statements in an inner loop relative to the loop being
5690 vectorized are weighted more heavily. The value here is
5691 arbitrary and could potentially be improved with analysis. */
5692 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5693 count *= 50; /* FIXME. */
5695 retval = (unsigned) (count * stmt_cost);
5696 cost_data->cost[where] += retval;
5698 /* Check whether we're doing something other than just a copy loop.
5699 Not all such loops may be profitably vectorized; see
5700 rs6000_finish_cost. */
5701 if ((kind == vec_to_scalar || kind == vec_perm
5702 || kind == vec_promote_demote || kind == vec_construct
5703 || kind == scalar_to_vec)
5704 || (where == vect_body && kind == vector_stmt))
5705 rs6000_vect_nonmem = true;
5708 return retval;
5711 /* Implement targetm.vectorize.finish_cost. */
5713 static void
5714 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5715 unsigned *body_cost, unsigned *epilogue_cost)
5717 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5719 if (cost_data->loop_info)
5720 rs6000_density_test (cost_data);
5722 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5723 that require versioning for any reason. The vectorization is at
5724 best a wash inside the loop, and the versioning checks make
5725 profitability highly unlikely and potentially quite harmful. */
5726 if (cost_data->loop_info)
5728 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5729 if (!rs6000_vect_nonmem
5730 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5731 && LOOP_REQUIRES_VERSIONING (vec_info))
5732 cost_data->cost[vect_body] += 10000;
5735 *prologue_cost = cost_data->cost[vect_prologue];
5736 *body_cost = cost_data->cost[vect_body];
5737 *epilogue_cost = cost_data->cost[vect_epilogue];
5740 /* Implement targetm.vectorize.destroy_cost_data. */
5742 static void
5743 rs6000_destroy_cost_data (void *data)
5745 free (data);
5748 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5749 library with vectorized intrinsics. */
5751 static tree
5752 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5753 tree type_in)
5755 char name[32];
5756 const char *suffix = NULL;
5757 tree fntype, new_fndecl, bdecl = NULL_TREE;
5758 int n_args = 1;
5759 const char *bname;
5760 machine_mode el_mode, in_mode;
5761 int n, in_n;
5763 /* Libmass is suitable for unsafe math only as it does not correctly support
5764 parts of IEEE with the required precision such as denormals. Only support
5765 it if we have VSX to use the simd d2 or f4 functions.
5766 XXX: Add variable length support. */
5767 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5768 return NULL_TREE;
5770 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5771 n = TYPE_VECTOR_SUBPARTS (type_out);
5772 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5773 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5774 if (el_mode != in_mode
5775 || n != in_n)
5776 return NULL_TREE;
5778 switch (fn)
5780 CASE_CFN_ATAN2:
5781 CASE_CFN_HYPOT:
5782 CASE_CFN_POW:
5783 n_args = 2;
5784 gcc_fallthrough ();
5786 CASE_CFN_ACOS:
5787 CASE_CFN_ACOSH:
5788 CASE_CFN_ASIN:
5789 CASE_CFN_ASINH:
5790 CASE_CFN_ATAN:
5791 CASE_CFN_ATANH:
5792 CASE_CFN_CBRT:
5793 CASE_CFN_COS:
5794 CASE_CFN_COSH:
5795 CASE_CFN_ERF:
5796 CASE_CFN_ERFC:
5797 CASE_CFN_EXP2:
5798 CASE_CFN_EXP:
5799 CASE_CFN_EXPM1:
5800 CASE_CFN_LGAMMA:
5801 CASE_CFN_LOG10:
5802 CASE_CFN_LOG1P:
5803 CASE_CFN_LOG2:
5804 CASE_CFN_LOG:
5805 CASE_CFN_SIN:
5806 CASE_CFN_SINH:
5807 CASE_CFN_SQRT:
5808 CASE_CFN_TAN:
5809 CASE_CFN_TANH:
5810 if (el_mode == DFmode && n == 2)
5812 bdecl = mathfn_built_in (double_type_node, fn);
5813 suffix = "d2"; /* pow -> powd2 */
5815 else if (el_mode == SFmode && n == 4)
5817 bdecl = mathfn_built_in (float_type_node, fn);
5818 suffix = "4"; /* powf -> powf4 */
5820 else
5821 return NULL_TREE;
5822 if (!bdecl)
5823 return NULL_TREE;
5824 break;
5826 default:
5827 return NULL_TREE;
5830 gcc_assert (suffix != NULL);
5831 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5832 if (!bname)
5833 return NULL_TREE;
5835 strcpy (name, bname + sizeof ("__builtin_") - 1);
5836 strcat (name, suffix);
5838 if (n_args == 1)
5839 fntype = build_function_type_list (type_out, type_in, NULL);
5840 else if (n_args == 2)
5841 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5842 else
5843 gcc_unreachable ();
5845 /* Build a function declaration for the vectorized function. */
5846 new_fndecl = build_decl (BUILTINS_LOCATION,
5847 FUNCTION_DECL, get_identifier (name), fntype);
5848 TREE_PUBLIC (new_fndecl) = 1;
5849 DECL_EXTERNAL (new_fndecl) = 1;
5850 DECL_IS_NOVOPS (new_fndecl) = 1;
5851 TREE_READONLY (new_fndecl) = 1;
5853 return new_fndecl;
5856 /* Returns a function decl for a vectorized version of the builtin function
5857 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5858 if it is not available. */
5860 static tree
5861 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5862 tree type_in)
5864 machine_mode in_mode, out_mode;
5865 int in_n, out_n;
5867 if (TARGET_DEBUG_BUILTIN)
5868 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5869 combined_fn_name (combined_fn (fn)),
5870 GET_MODE_NAME (TYPE_MODE (type_out)),
5871 GET_MODE_NAME (TYPE_MODE (type_in)));
5873 if (TREE_CODE (type_out) != VECTOR_TYPE
5874 || TREE_CODE (type_in) != VECTOR_TYPE)
5875 return NULL_TREE;
5877 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5878 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5879 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5880 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5882 switch (fn)
5884 CASE_CFN_COPYSIGN:
5885 if (VECTOR_UNIT_VSX_P (V2DFmode)
5886 && out_mode == DFmode && out_n == 2
5887 && in_mode == DFmode && in_n == 2)
5888 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5889 if (VECTOR_UNIT_VSX_P (V4SFmode)
5890 && out_mode == SFmode && out_n == 4
5891 && in_mode == SFmode && in_n == 4)
5892 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5893 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5894 && out_mode == SFmode && out_n == 4
5895 && in_mode == SFmode && in_n == 4)
5896 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5897 break;
5898 CASE_CFN_CEIL:
5899 if (VECTOR_UNIT_VSX_P (V2DFmode)
5900 && out_mode == DFmode && out_n == 2
5901 && in_mode == DFmode && in_n == 2)
5902 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5903 if (VECTOR_UNIT_VSX_P (V4SFmode)
5904 && out_mode == SFmode && out_n == 4
5905 && in_mode == SFmode && in_n == 4)
5906 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5907 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5908 && out_mode == SFmode && out_n == 4
5909 && in_mode == SFmode && in_n == 4)
5910 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5911 break;
5912 CASE_CFN_FLOOR:
5913 if (VECTOR_UNIT_VSX_P (V2DFmode)
5914 && out_mode == DFmode && out_n == 2
5915 && in_mode == DFmode && in_n == 2)
5916 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5917 if (VECTOR_UNIT_VSX_P (V4SFmode)
5918 && out_mode == SFmode && out_n == 4
5919 && in_mode == SFmode && in_n == 4)
5920 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5921 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5922 && out_mode == SFmode && out_n == 4
5923 && in_mode == SFmode && in_n == 4)
5924 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5925 break;
5926 CASE_CFN_FMA:
5927 if (VECTOR_UNIT_VSX_P (V2DFmode)
5928 && out_mode == DFmode && out_n == 2
5929 && in_mode == DFmode && in_n == 2)
5930 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5931 if (VECTOR_UNIT_VSX_P (V4SFmode)
5932 && out_mode == SFmode && out_n == 4
5933 && in_mode == SFmode && in_n == 4)
5934 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5935 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5936 && out_mode == SFmode && out_n == 4
5937 && in_mode == SFmode && in_n == 4)
5938 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5939 break;
5940 CASE_CFN_TRUNC:
5941 if (VECTOR_UNIT_VSX_P (V2DFmode)
5942 && out_mode == DFmode && out_n == 2
5943 && in_mode == DFmode && in_n == 2)
5944 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5945 if (VECTOR_UNIT_VSX_P (V4SFmode)
5946 && out_mode == SFmode && out_n == 4
5947 && in_mode == SFmode && in_n == 4)
5948 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5949 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5950 && out_mode == SFmode && out_n == 4
5951 && in_mode == SFmode && in_n == 4)
5952 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5953 break;
5954 CASE_CFN_NEARBYINT:
5955 if (VECTOR_UNIT_VSX_P (V2DFmode)
5956 && flag_unsafe_math_optimizations
5957 && out_mode == DFmode && out_n == 2
5958 && in_mode == DFmode && in_n == 2)
5959 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5960 if (VECTOR_UNIT_VSX_P (V4SFmode)
5961 && flag_unsafe_math_optimizations
5962 && out_mode == SFmode && out_n == 4
5963 && in_mode == SFmode && in_n == 4)
5964 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5965 break;
5966 CASE_CFN_RINT:
5967 if (VECTOR_UNIT_VSX_P (V2DFmode)
5968 && !flag_trapping_math
5969 && out_mode == DFmode && out_n == 2
5970 && in_mode == DFmode && in_n == 2)
5971 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5972 if (VECTOR_UNIT_VSX_P (V4SFmode)
5973 && !flag_trapping_math
5974 && out_mode == SFmode && out_n == 4
5975 && in_mode == SFmode && in_n == 4)
5976 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5977 break;
5978 default:
5979 break;
5982 /* Generate calls to libmass if appropriate. */
5983 if (rs6000_veclib_handler)
5984 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5986 return NULL_TREE;
5989 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5991 static tree
5992 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5993 tree type_in)
5995 machine_mode in_mode, out_mode;
5996 int in_n, out_n;
5998 if (TARGET_DEBUG_BUILTIN)
5999 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6000 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6001 GET_MODE_NAME (TYPE_MODE (type_out)),
6002 GET_MODE_NAME (TYPE_MODE (type_in)));
6004 if (TREE_CODE (type_out) != VECTOR_TYPE
6005 || TREE_CODE (type_in) != VECTOR_TYPE)
6006 return NULL_TREE;
6008 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6009 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6010 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6011 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6013 enum rs6000_builtins fn
6014 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6015 switch (fn)
6017 case RS6000_BUILTIN_RSQRTF:
6018 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6019 && out_mode == SFmode && out_n == 4
6020 && in_mode == SFmode && in_n == 4)
6021 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6022 break;
6023 case RS6000_BUILTIN_RSQRT:
6024 if (VECTOR_UNIT_VSX_P (V2DFmode)
6025 && out_mode == DFmode && out_n == 2
6026 && in_mode == DFmode && in_n == 2)
6027 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6028 break;
6029 case RS6000_BUILTIN_RECIPF:
6030 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6031 && out_mode == SFmode && out_n == 4
6032 && in_mode == SFmode && in_n == 4)
6033 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6034 break;
6035 case RS6000_BUILTIN_RECIP:
6036 if (VECTOR_UNIT_VSX_P (V2DFmode)
6037 && out_mode == DFmode && out_n == 2
6038 && in_mode == DFmode && in_n == 2)
6039 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6040 break;
6041 default:
6042 break;
6044 return NULL_TREE;
6047 /* Default CPU string for rs6000*_file_start functions. */
6048 static const char *rs6000_default_cpu;
6050 /* Do anything needed at the start of the asm file. */
6052 static void
6053 rs6000_file_start (void)
6055 char buffer[80];
6056 const char *start = buffer;
6057 FILE *file = asm_out_file;
6059 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6061 default_file_start ();
6063 if (flag_verbose_asm)
6065 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6067 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6069 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6070 start = "";
6073 if (global_options_set.x_rs6000_cpu_index)
6075 fprintf (file, "%s -mcpu=%s", start,
6076 processor_target_table[rs6000_cpu_index].name);
6077 start = "";
6080 if (global_options_set.x_rs6000_tune_index)
6082 fprintf (file, "%s -mtune=%s", start,
6083 processor_target_table[rs6000_tune_index].name);
6084 start = "";
6087 if (PPC405_ERRATUM77)
6089 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6090 start = "";
6093 #ifdef USING_ELFOS_H
6094 switch (rs6000_sdata)
6096 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6097 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6098 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6099 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6102 if (rs6000_sdata && g_switch_value)
6104 fprintf (file, "%s -G %d", start,
6105 g_switch_value);
6106 start = "";
6108 #endif
6110 if (*start == '\0')
6111 putc ('\n', file);
6114 #ifdef USING_ELFOS_H
6115 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6116 && !global_options_set.x_rs6000_cpu_index)
6118 fputs ("\t.machine ", asm_out_file);
6119 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6120 fputs ("power9\n", asm_out_file);
6121 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6122 fputs ("power8\n", asm_out_file);
6123 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6124 fputs ("power7\n", asm_out_file);
6125 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6126 fputs ("power6\n", asm_out_file);
6127 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6128 fputs ("power5\n", asm_out_file);
6129 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6130 fputs ("power4\n", asm_out_file);
6131 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6132 fputs ("ppc64\n", asm_out_file);
6133 else
6134 fputs ("ppc\n", asm_out_file);
6136 #endif
6138 if (DEFAULT_ABI == ABI_ELFv2)
6139 fprintf (file, "\t.abiversion 2\n");
6143 /* Return nonzero if this function is known to have a null epilogue. */
6146 direct_return (void)
6148 if (reload_completed)
6150 rs6000_stack_t *info = rs6000_stack_info ();
6152 if (info->first_gp_reg_save == 32
6153 && info->first_fp_reg_save == 64
6154 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6155 && ! info->lr_save_p
6156 && ! info->cr_save_p
6157 && info->vrsave_size == 0
6158 && ! info->push_p)
6159 return 1;
6162 return 0;
6165 /* Return the number of instructions it takes to form a constant in an
6166 integer register. */
6169 num_insns_constant_wide (HOST_WIDE_INT value)
6171 /* signed constant loadable with addi */
6172 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6173 return 1;
6175 /* constant loadable with addis */
6176 else if ((value & 0xffff) == 0
6177 && (value >> 31 == -1 || value >> 31 == 0))
6178 return 1;
6180 else if (TARGET_POWERPC64)
6182 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6183 HOST_WIDE_INT high = value >> 31;
6185 if (high == 0 || high == -1)
6186 return 2;
6188 high >>= 1;
6190 if (low == 0)
6191 return num_insns_constant_wide (high) + 1;
6192 else if (high == 0)
6193 return num_insns_constant_wide (low) + 1;
6194 else
6195 return (num_insns_constant_wide (high)
6196 + num_insns_constant_wide (low) + 1);
6199 else
6200 return 2;
6204 num_insns_constant (rtx op, machine_mode mode)
6206 HOST_WIDE_INT low, high;
6208 switch (GET_CODE (op))
6210 case CONST_INT:
6211 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6212 && rs6000_is_valid_and_mask (op, mode))
6213 return 2;
6214 else
6215 return num_insns_constant_wide (INTVAL (op));
6217 case CONST_WIDE_INT:
6219 int i;
6220 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6221 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6222 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6223 return ins;
6226 case CONST_DOUBLE:
6227 if (mode == SFmode || mode == SDmode)
6229 long l;
6231 if (DECIMAL_FLOAT_MODE_P (mode))
6232 REAL_VALUE_TO_TARGET_DECIMAL32
6233 (*CONST_DOUBLE_REAL_VALUE (op), l);
6234 else
6235 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6236 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6239 long l[2];
6240 if (DECIMAL_FLOAT_MODE_P (mode))
6241 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6242 else
6243 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6244 high = l[WORDS_BIG_ENDIAN == 0];
6245 low = l[WORDS_BIG_ENDIAN != 0];
6247 if (TARGET_32BIT)
6248 return (num_insns_constant_wide (low)
6249 + num_insns_constant_wide (high));
6250 else
6252 if ((high == 0 && low >= 0)
6253 || (high == -1 && low < 0))
6254 return num_insns_constant_wide (low);
6256 else if (rs6000_is_valid_and_mask (op, mode))
6257 return 2;
6259 else if (low == 0)
6260 return num_insns_constant_wide (high) + 1;
6262 else
6263 return (num_insns_constant_wide (high)
6264 + num_insns_constant_wide (low) + 1);
6267 default:
6268 gcc_unreachable ();
6272 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6273 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6274 corresponding element of the vector, but for V4SFmode and V2SFmode,
6275 the corresponding "float" is interpreted as an SImode integer. */
6277 HOST_WIDE_INT
6278 const_vector_elt_as_int (rtx op, unsigned int elt)
6280 rtx tmp;
6282 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6283 gcc_assert (GET_MODE (op) != V2DImode
6284 && GET_MODE (op) != V2DFmode);
6286 tmp = CONST_VECTOR_ELT (op, elt);
6287 if (GET_MODE (op) == V4SFmode
6288 || GET_MODE (op) == V2SFmode)
6289 tmp = gen_lowpart (SImode, tmp);
6290 return INTVAL (tmp);
6293 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6294 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6295 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6296 all items are set to the same value and contain COPIES replicas of the
6297 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6298 operand and the others are set to the value of the operand's msb. */
6300 static bool
6301 vspltis_constant (rtx op, unsigned step, unsigned copies)
6303 machine_mode mode = GET_MODE (op);
6304 machine_mode inner = GET_MODE_INNER (mode);
6306 unsigned i;
6307 unsigned nunits;
6308 unsigned bitsize;
6309 unsigned mask;
6311 HOST_WIDE_INT val;
6312 HOST_WIDE_INT splat_val;
6313 HOST_WIDE_INT msb_val;
6315 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6316 return false;
6318 nunits = GET_MODE_NUNITS (mode);
6319 bitsize = GET_MODE_BITSIZE (inner);
6320 mask = GET_MODE_MASK (inner);
6322 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6323 splat_val = val;
6324 msb_val = val >= 0 ? 0 : -1;
6326 /* Construct the value to be splatted, if possible. If not, return 0. */
6327 for (i = 2; i <= copies; i *= 2)
6329 HOST_WIDE_INT small_val;
6330 bitsize /= 2;
6331 small_val = splat_val >> bitsize;
6332 mask >>= bitsize;
6333 if (splat_val != ((HOST_WIDE_INT)
6334 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6335 | (small_val & mask)))
6336 return false;
6337 splat_val = small_val;
6340 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6341 if (EASY_VECTOR_15 (splat_val))
6344 /* Also check if we can splat, and then add the result to itself. Do so if
6345 the value is positive, of if the splat instruction is using OP's mode;
6346 for splat_val < 0, the splat and the add should use the same mode. */
6347 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6348 && (splat_val >= 0 || (step == 1 && copies == 1)))
6351 /* Also check if are loading up the most significant bit which can be done by
6352 loading up -1 and shifting the value left by -1. */
6353 else if (EASY_VECTOR_MSB (splat_val, inner))
6356 else
6357 return false;
6359 /* Check if VAL is present in every STEP-th element, and the
6360 other elements are filled with its most significant bit. */
6361 for (i = 1; i < nunits; ++i)
6363 HOST_WIDE_INT desired_val;
6364 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6365 if ((i & (step - 1)) == 0)
6366 desired_val = val;
6367 else
6368 desired_val = msb_val;
6370 if (desired_val != const_vector_elt_as_int (op, elt))
6371 return false;
6374 return true;
6377 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6378 instruction, filling in the bottom elements with 0 or -1.
6380 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6381 for the number of zeroes to shift in, or negative for the number of 0xff
6382 bytes to shift in.
6384 OP is a CONST_VECTOR. */
6387 vspltis_shifted (rtx op)
6389 machine_mode mode = GET_MODE (op);
6390 machine_mode inner = GET_MODE_INNER (mode);
6392 unsigned i, j;
6393 unsigned nunits;
6394 unsigned mask;
6396 HOST_WIDE_INT val;
6398 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6399 return false;
6401 /* We need to create pseudo registers to do the shift, so don't recognize
6402 shift vector constants after reload. */
6403 if (!can_create_pseudo_p ())
6404 return false;
6406 nunits = GET_MODE_NUNITS (mode);
6407 mask = GET_MODE_MASK (inner);
6409 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6411 /* Check if the value can really be the operand of a vspltis[bhw]. */
6412 if (EASY_VECTOR_15 (val))
6415 /* Also check if we are loading up the most significant bit which can be done
6416 by loading up -1 and shifting the value left by -1. */
6417 else if (EASY_VECTOR_MSB (val, inner))
6420 else
6421 return 0;
6423 /* Check if VAL is present in every STEP-th element until we find elements
6424 that are 0 or all 1 bits. */
6425 for (i = 1; i < nunits; ++i)
6427 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6428 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6430 /* If the value isn't the splat value, check for the remaining elements
6431 being 0/-1. */
6432 if (val != elt_val)
6434 if (elt_val == 0)
6436 for (j = i+1; j < nunits; ++j)
6438 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6439 if (const_vector_elt_as_int (op, elt2) != 0)
6440 return 0;
6443 return (nunits - i) * GET_MODE_SIZE (inner);
6446 else if ((elt_val & mask) == mask)
6448 for (j = i+1; j < nunits; ++j)
6450 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6451 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6452 return 0;
6455 return -((nunits - i) * GET_MODE_SIZE (inner));
6458 else
6459 return 0;
6463 /* If all elements are equal, we don't need to do VLSDOI. */
6464 return 0;
6468 /* Return true if OP is of the given MODE and can be synthesized
6469 with a vspltisb, vspltish or vspltisw. */
6471 bool
6472 easy_altivec_constant (rtx op, machine_mode mode)
6474 unsigned step, copies;
6476 if (mode == VOIDmode)
6477 mode = GET_MODE (op);
6478 else if (mode != GET_MODE (op))
6479 return false;
6481 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6482 constants. */
6483 if (mode == V2DFmode)
6484 return zero_constant (op, mode);
6486 else if (mode == V2DImode)
6488 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6489 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6490 return false;
6492 if (zero_constant (op, mode))
6493 return true;
6495 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6496 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6497 return true;
6499 return false;
6502 /* V1TImode is a special container for TImode. Ignore for now. */
6503 else if (mode == V1TImode)
6504 return false;
6506 /* Start with a vspltisw. */
6507 step = GET_MODE_NUNITS (mode) / 4;
6508 copies = 1;
6510 if (vspltis_constant (op, step, copies))
6511 return true;
6513 /* Then try with a vspltish. */
6514 if (step == 1)
6515 copies <<= 1;
6516 else
6517 step >>= 1;
6519 if (vspltis_constant (op, step, copies))
6520 return true;
6522 /* And finally a vspltisb. */
6523 if (step == 1)
6524 copies <<= 1;
6525 else
6526 step >>= 1;
6528 if (vspltis_constant (op, step, copies))
6529 return true;
6531 if (vspltis_shifted (op) != 0)
6532 return true;
6534 return false;
6537 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6538 result is OP. Abort if it is not possible. */
6541 gen_easy_altivec_constant (rtx op)
6543 machine_mode mode = GET_MODE (op);
6544 int nunits = GET_MODE_NUNITS (mode);
6545 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6546 unsigned step = nunits / 4;
6547 unsigned copies = 1;
6549 /* Start with a vspltisw. */
6550 if (vspltis_constant (op, step, copies))
6551 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6553 /* Then try with a vspltish. */
6554 if (step == 1)
6555 copies <<= 1;
6556 else
6557 step >>= 1;
6559 if (vspltis_constant (op, step, copies))
6560 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6562 /* And finally a vspltisb. */
6563 if (step == 1)
6564 copies <<= 1;
6565 else
6566 step >>= 1;
6568 if (vspltis_constant (op, step, copies))
6569 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6571 gcc_unreachable ();
6574 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6575 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6577 Return the number of instructions needed (1 or 2) into the address pointed
6578 via NUM_INSNS_PTR.
6580 Return the constant that is being split via CONSTANT_PTR. */
6582 bool
6583 xxspltib_constant_p (rtx op,
6584 machine_mode mode,
6585 int *num_insns_ptr,
6586 int *constant_ptr)
6588 size_t nunits = GET_MODE_NUNITS (mode);
6589 size_t i;
6590 HOST_WIDE_INT value;
6591 rtx element;
6593 /* Set the returned values to out of bound values. */
6594 *num_insns_ptr = -1;
6595 *constant_ptr = 256;
6597 if (!TARGET_P9_VECTOR)
6598 return false;
6600 if (mode == VOIDmode)
6601 mode = GET_MODE (op);
6603 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6604 return false;
6606 /* Handle (vec_duplicate <constant>). */
6607 if (GET_CODE (op) == VEC_DUPLICATE)
6609 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6610 && mode != V2DImode)
6611 return false;
6613 element = XEXP (op, 0);
6614 if (!CONST_INT_P (element))
6615 return false;
6617 value = INTVAL (element);
6618 if (!IN_RANGE (value, -128, 127))
6619 return false;
6622 /* Handle (const_vector [...]). */
6623 else if (GET_CODE (op) == CONST_VECTOR)
6625 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6626 && mode != V2DImode)
6627 return false;
6629 element = CONST_VECTOR_ELT (op, 0);
6630 if (!CONST_INT_P (element))
6631 return false;
6633 value = INTVAL (element);
6634 if (!IN_RANGE (value, -128, 127))
6635 return false;
6637 for (i = 1; i < nunits; i++)
6639 element = CONST_VECTOR_ELT (op, i);
6640 if (!CONST_INT_P (element))
6641 return false;
6643 if (value != INTVAL (element))
6644 return false;
6648 /* Handle integer constants being loaded into the upper part of the VSX
6649 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6650 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6651 else if (CONST_INT_P (op))
6653 if (!SCALAR_INT_MODE_P (mode))
6654 return false;
6656 value = INTVAL (op);
6657 if (!IN_RANGE (value, -128, 127))
6658 return false;
6660 if (!IN_RANGE (value, -1, 0))
6662 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6663 return false;
6665 if (EASY_VECTOR_15 (value))
6666 return false;
6670 else
6671 return false;
6673 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6674 sign extend. Special case 0/-1 to allow getting any VSX register instead
6675 of an Altivec register. */
6676 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6677 && EASY_VECTOR_15 (value))
6678 return false;
6680 /* Return # of instructions and the constant byte for XXSPLTIB. */
6681 if (mode == V16QImode)
6682 *num_insns_ptr = 1;
6684 else if (IN_RANGE (value, -1, 0))
6685 *num_insns_ptr = 1;
6687 else
6688 *num_insns_ptr = 2;
6690 *constant_ptr = (int) value;
6691 return true;
6694 const char *
6695 output_vec_const_move (rtx *operands)
6697 int shift;
6698 machine_mode mode;
6699 rtx dest, vec;
6701 dest = operands[0];
6702 vec = operands[1];
6703 mode = GET_MODE (dest);
6705 if (TARGET_VSX)
6707 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6708 int xxspltib_value = 256;
6709 int num_insns = -1;
6711 if (zero_constant (vec, mode))
6713 if (TARGET_P9_VECTOR)
6714 return "xxspltib %x0,0";
6716 else if (dest_vmx_p)
6717 return "vspltisw %0,0";
6719 else
6720 return "xxlxor %x0,%x0,%x0";
6723 if (all_ones_constant (vec, mode))
6725 if (TARGET_P9_VECTOR)
6726 return "xxspltib %x0,255";
6728 else if (dest_vmx_p)
6729 return "vspltisw %0,-1";
6731 else if (TARGET_P8_VECTOR)
6732 return "xxlorc %x0,%x0,%x0";
6734 else
6735 gcc_unreachable ();
6738 if (TARGET_P9_VECTOR
6739 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6741 if (num_insns == 1)
6743 operands[2] = GEN_INT (xxspltib_value & 0xff);
6744 return "xxspltib %x0,%2";
6747 return "#";
6751 if (TARGET_ALTIVEC)
6753 rtx splat_vec;
6755 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6756 if (zero_constant (vec, mode))
6757 return "vspltisw %0,0";
6759 if (all_ones_constant (vec, mode))
6760 return "vspltisw %0,-1";
6762 /* Do we need to construct a value using VSLDOI? */
6763 shift = vspltis_shifted (vec);
6764 if (shift != 0)
6765 return "#";
6767 splat_vec = gen_easy_altivec_constant (vec);
6768 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6769 operands[1] = XEXP (splat_vec, 0);
6770 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6771 return "#";
6773 switch (GET_MODE (splat_vec))
6775 case E_V4SImode:
6776 return "vspltisw %0,%1";
6778 case E_V8HImode:
6779 return "vspltish %0,%1";
6781 case E_V16QImode:
6782 return "vspltisb %0,%1";
6784 default:
6785 gcc_unreachable ();
6789 gcc_unreachable ();
6792 /* Initialize TARGET of vector PAIRED to VALS. */
6794 void
6795 paired_expand_vector_init (rtx target, rtx vals)
6797 machine_mode mode = GET_MODE (target);
6798 int n_elts = GET_MODE_NUNITS (mode);
6799 int n_var = 0;
6800 rtx x, new_rtx, tmp, constant_op, op1, op2;
6801 int i;
6803 for (i = 0; i < n_elts; ++i)
6805 x = XVECEXP (vals, 0, i);
6806 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6807 ++n_var;
6809 if (n_var == 0)
6811 /* Load from constant pool. */
6812 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6813 return;
6816 if (n_var == 2)
6818 /* The vector is initialized only with non-constants. */
6819 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6820 XVECEXP (vals, 0, 1));
6822 emit_move_insn (target, new_rtx);
6823 return;
6826 /* One field is non-constant and the other one is a constant. Load the
6827 constant from the constant pool and use ps_merge instruction to
6828 construct the whole vector. */
6829 op1 = XVECEXP (vals, 0, 0);
6830 op2 = XVECEXP (vals, 0, 1);
6832 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6834 tmp = gen_reg_rtx (GET_MODE (constant_op));
6835 emit_move_insn (tmp, constant_op);
6837 if (CONSTANT_P (op1))
6838 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6839 else
6840 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6842 emit_move_insn (target, new_rtx);
6845 void
6846 paired_expand_vector_move (rtx operands[])
6848 rtx op0 = operands[0], op1 = operands[1];
6850 emit_move_insn (op0, op1);
6853 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6854 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6855 operands for the relation operation COND. This is a recursive
6856 function. */
6858 static void
6859 paired_emit_vector_compare (enum rtx_code rcode,
6860 rtx dest, rtx op0, rtx op1,
6861 rtx cc_op0, rtx cc_op1)
6863 rtx tmp = gen_reg_rtx (V2SFmode);
6864 rtx tmp1, max, min;
6866 gcc_assert (TARGET_PAIRED_FLOAT);
6867 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6869 switch (rcode)
6871 case LT:
6872 case LTU:
6873 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6874 return;
6875 case GE:
6876 case GEU:
6877 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6878 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6879 return;
6880 case LE:
6881 case LEU:
6882 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6883 return;
6884 case GT:
6885 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6886 return;
6887 case EQ:
6888 tmp1 = gen_reg_rtx (V2SFmode);
6889 max = gen_reg_rtx (V2SFmode);
6890 min = gen_reg_rtx (V2SFmode);
6891 gen_reg_rtx (V2SFmode);
6893 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6894 emit_insn (gen_selv2sf4
6895 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6896 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6897 emit_insn (gen_selv2sf4
6898 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6899 emit_insn (gen_subv2sf3 (tmp1, min, max));
6900 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6901 return;
6902 case NE:
6903 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6904 return;
6905 case UNLE:
6906 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6907 return;
6908 case UNLT:
6909 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6910 return;
6911 case UNGE:
6912 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6913 return;
6914 case UNGT:
6915 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6916 return;
6917 default:
6918 gcc_unreachable ();
6921 return;
6924 /* Emit vector conditional expression.
6925 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6926 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6929 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6930 rtx cond, rtx cc_op0, rtx cc_op1)
6932 enum rtx_code rcode = GET_CODE (cond);
6934 if (!TARGET_PAIRED_FLOAT)
6935 return 0;
6937 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6939 return 1;
6942 /* Initialize vector TARGET to VALS. */
6944 void
6945 rs6000_expand_vector_init (rtx target, rtx vals)
6947 machine_mode mode = GET_MODE (target);
6948 machine_mode inner_mode = GET_MODE_INNER (mode);
6949 int n_elts = GET_MODE_NUNITS (mode);
6950 int n_var = 0, one_var = -1;
6951 bool all_same = true, all_const_zero = true;
6952 rtx x, mem;
6953 int i;
6955 for (i = 0; i < n_elts; ++i)
6957 x = XVECEXP (vals, 0, i);
6958 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6959 ++n_var, one_var = i;
6960 else if (x != CONST0_RTX (inner_mode))
6961 all_const_zero = false;
6963 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6964 all_same = false;
6967 if (n_var == 0)
6969 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6970 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6971 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6973 /* Zero register. */
6974 emit_move_insn (target, CONST0_RTX (mode));
6975 return;
6977 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6979 /* Splat immediate. */
6980 emit_insn (gen_rtx_SET (target, const_vec));
6981 return;
6983 else
6985 /* Load from constant pool. */
6986 emit_move_insn (target, const_vec);
6987 return;
6991 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6992 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6994 rtx op[2];
6995 size_t i;
6996 size_t num_elements = all_same ? 1 : 2;
6997 for (i = 0; i < num_elements; i++)
6999 op[i] = XVECEXP (vals, 0, i);
7000 /* Just in case there is a SUBREG with a smaller mode, do a
7001 conversion. */
7002 if (GET_MODE (op[i]) != inner_mode)
7004 rtx tmp = gen_reg_rtx (inner_mode);
7005 convert_move (tmp, op[i], 0);
7006 op[i] = tmp;
7008 /* Allow load with splat double word. */
7009 else if (MEM_P (op[i]))
7011 if (!all_same)
7012 op[i] = force_reg (inner_mode, op[i]);
7014 else if (!REG_P (op[i]))
7015 op[i] = force_reg (inner_mode, op[i]);
7018 if (all_same)
7020 if (mode == V2DFmode)
7021 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7022 else
7023 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7025 else
7027 if (mode == V2DFmode)
7028 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7029 else
7030 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7032 return;
7035 /* Special case initializing vector int if we are on 64-bit systems with
7036 direct move or we have the ISA 3.0 instructions. */
7037 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7038 && TARGET_DIRECT_MOVE_64BIT)
7040 if (all_same)
7042 rtx element0 = XVECEXP (vals, 0, 0);
7043 if (MEM_P (element0))
7044 element0 = rs6000_address_for_fpconvert (element0);
7045 else
7046 element0 = force_reg (SImode, element0);
7048 if (TARGET_P9_VECTOR)
7049 emit_insn (gen_vsx_splat_v4si (target, element0));
7050 else
7052 rtx tmp = gen_reg_rtx (DImode);
7053 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7054 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7056 return;
7058 else
7060 rtx elements[4];
7061 size_t i;
7063 for (i = 0; i < 4; i++)
7065 elements[i] = XVECEXP (vals, 0, i);
7066 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7067 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7070 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7071 elements[2], elements[3]));
7072 return;
7076 /* With single precision floating point on VSX, know that internally single
7077 precision is actually represented as a double, and either make 2 V2DF
7078 vectors, and convert these vectors to single precision, or do one
7079 conversion, and splat the result to the other elements. */
7080 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7082 if (all_same)
7084 rtx element0 = XVECEXP (vals, 0, 0);
7086 if (TARGET_P9_VECTOR)
7088 if (MEM_P (element0))
7089 element0 = rs6000_address_for_fpconvert (element0);
7091 emit_insn (gen_vsx_splat_v4sf (target, element0));
7094 else
7096 rtx freg = gen_reg_rtx (V4SFmode);
7097 rtx sreg = force_reg (SFmode, element0);
7098 rtx cvt = (TARGET_XSCVDPSPN
7099 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7100 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7102 emit_insn (cvt);
7103 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7104 const0_rtx));
7107 else
7109 rtx dbl_even = gen_reg_rtx (V2DFmode);
7110 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7111 rtx flt_even = gen_reg_rtx (V4SFmode);
7112 rtx flt_odd = gen_reg_rtx (V4SFmode);
7113 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7114 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7115 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7116 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7118 /* Use VMRGEW if we can instead of doing a permute. */
7119 if (TARGET_P8_VECTOR)
7121 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7122 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7123 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7124 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7125 if (BYTES_BIG_ENDIAN)
7126 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7127 else
7128 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7130 else
7132 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7133 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7134 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7135 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7136 rs6000_expand_extract_even (target, flt_even, flt_odd);
7139 return;
7142 /* Special case initializing vector short/char that are splats if we are on
7143 64-bit systems with direct move. */
7144 if (all_same && TARGET_DIRECT_MOVE_64BIT
7145 && (mode == V16QImode || mode == V8HImode))
7147 rtx op0 = XVECEXP (vals, 0, 0);
7148 rtx di_tmp = gen_reg_rtx (DImode);
7150 if (!REG_P (op0))
7151 op0 = force_reg (GET_MODE_INNER (mode), op0);
7153 if (mode == V16QImode)
7155 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7156 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7157 return;
7160 if (mode == V8HImode)
7162 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7163 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7164 return;
7168 /* Store value to stack temp. Load vector element. Splat. However, splat
7169 of 64-bit items is not supported on Altivec. */
7170 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7172 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7173 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7174 XVECEXP (vals, 0, 0));
7175 x = gen_rtx_UNSPEC (VOIDmode,
7176 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7177 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7178 gen_rtvec (2,
7179 gen_rtx_SET (target, mem),
7180 x)));
7181 x = gen_rtx_VEC_SELECT (inner_mode, target,
7182 gen_rtx_PARALLEL (VOIDmode,
7183 gen_rtvec (1, const0_rtx)));
7184 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7185 return;
7188 /* One field is non-constant. Load constant then overwrite
7189 varying field. */
7190 if (n_var == 1)
7192 rtx copy = copy_rtx (vals);
7194 /* Load constant part of vector, substitute neighboring value for
7195 varying element. */
7196 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7197 rs6000_expand_vector_init (target, copy);
7199 /* Insert variable. */
7200 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7201 return;
7204 /* Construct the vector in memory one field at a time
7205 and load the whole vector. */
7206 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7207 for (i = 0; i < n_elts; i++)
7208 emit_move_insn (adjust_address_nv (mem, inner_mode,
7209 i * GET_MODE_SIZE (inner_mode)),
7210 XVECEXP (vals, 0, i));
7211 emit_move_insn (target, mem);
7214 /* Set field ELT of TARGET to VAL. */
7216 void
7217 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7219 machine_mode mode = GET_MODE (target);
7220 machine_mode inner_mode = GET_MODE_INNER (mode);
7221 rtx reg = gen_reg_rtx (mode);
7222 rtx mask, mem, x;
7223 int width = GET_MODE_SIZE (inner_mode);
7224 int i;
7226 val = force_reg (GET_MODE (val), val);
7228 if (VECTOR_MEM_VSX_P (mode))
7230 rtx insn = NULL_RTX;
7231 rtx elt_rtx = GEN_INT (elt);
7233 if (mode == V2DFmode)
7234 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7236 else if (mode == V2DImode)
7237 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7239 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7241 if (mode == V4SImode)
7242 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7243 else if (mode == V8HImode)
7244 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7245 else if (mode == V16QImode)
7246 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7247 else if (mode == V4SFmode)
7248 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7251 if (insn)
7253 emit_insn (insn);
7254 return;
7258 /* Simplify setting single element vectors like V1TImode. */
7259 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7261 emit_move_insn (target, gen_lowpart (mode, val));
7262 return;
7265 /* Load single variable value. */
7266 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7267 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7268 x = gen_rtx_UNSPEC (VOIDmode,
7269 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7270 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7271 gen_rtvec (2,
7272 gen_rtx_SET (reg, mem),
7273 x)));
7275 /* Linear sequence. */
7276 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7277 for (i = 0; i < 16; ++i)
7278 XVECEXP (mask, 0, i) = GEN_INT (i);
7280 /* Set permute mask to insert element into target. */
7281 for (i = 0; i < width; ++i)
7282 XVECEXP (mask, 0, elt*width + i)
7283 = GEN_INT (i + 0x10);
7284 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7286 if (BYTES_BIG_ENDIAN)
7287 x = gen_rtx_UNSPEC (mode,
7288 gen_rtvec (3, target, reg,
7289 force_reg (V16QImode, x)),
7290 UNSPEC_VPERM);
7291 else
7293 if (TARGET_P9_VECTOR)
7294 x = gen_rtx_UNSPEC (mode,
7295 gen_rtvec (3, target, reg,
7296 force_reg (V16QImode, x)),
7297 UNSPEC_VPERMR);
7298 else
7300 /* Invert selector. We prefer to generate VNAND on P8 so
7301 that future fusion opportunities can kick in, but must
7302 generate VNOR elsewhere. */
7303 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7304 rtx iorx = (TARGET_P8_VECTOR
7305 ? gen_rtx_IOR (V16QImode, notx, notx)
7306 : gen_rtx_AND (V16QImode, notx, notx));
7307 rtx tmp = gen_reg_rtx (V16QImode);
7308 emit_insn (gen_rtx_SET (tmp, iorx));
7310 /* Permute with operands reversed and adjusted selector. */
7311 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7312 UNSPEC_VPERM);
7316 emit_insn (gen_rtx_SET (target, x));
7319 /* Extract field ELT from VEC into TARGET. */
7321 void
7322 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7324 machine_mode mode = GET_MODE (vec);
7325 machine_mode inner_mode = GET_MODE_INNER (mode);
7326 rtx mem;
7328 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7330 switch (mode)
7332 default:
7333 break;
7334 case E_V1TImode:
7335 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7336 emit_move_insn (target, gen_lowpart (TImode, vec));
7337 break;
7338 case E_V2DFmode:
7339 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7340 return;
7341 case E_V2DImode:
7342 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7343 return;
7344 case E_V4SFmode:
7345 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7346 return;
7347 case E_V16QImode:
7348 if (TARGET_DIRECT_MOVE_64BIT)
7350 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7351 return;
7353 else
7354 break;
7355 case E_V8HImode:
7356 if (TARGET_DIRECT_MOVE_64BIT)
7358 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7359 return;
7361 else
7362 break;
7363 case E_V4SImode:
7364 if (TARGET_DIRECT_MOVE_64BIT)
7366 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7367 return;
7369 break;
7372 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7373 && TARGET_DIRECT_MOVE_64BIT)
7375 if (GET_MODE (elt) != DImode)
7377 rtx tmp = gen_reg_rtx (DImode);
7378 convert_move (tmp, elt, 0);
7379 elt = tmp;
7381 else if (!REG_P (elt))
7382 elt = force_reg (DImode, elt);
7384 switch (mode)
7386 case E_V2DFmode:
7387 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7388 return;
7390 case E_V2DImode:
7391 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7392 return;
7394 case E_V4SFmode:
7395 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7396 return;
7398 case E_V4SImode:
7399 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7400 return;
7402 case E_V8HImode:
7403 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7404 return;
7406 case E_V16QImode:
7407 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7408 return;
7410 default:
7411 gcc_unreachable ();
7415 gcc_assert (CONST_INT_P (elt));
7417 /* Allocate mode-sized buffer. */
7418 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7420 emit_move_insn (mem, vec);
7422 /* Add offset to field within buffer matching vector element. */
7423 mem = adjust_address_nv (mem, inner_mode,
7424 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7426 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7429 /* Helper function to return the register number of a RTX. */
7430 static inline int
7431 regno_or_subregno (rtx op)
7433 if (REG_P (op))
7434 return REGNO (op);
7435 else if (SUBREG_P (op))
7436 return subreg_regno (op);
7437 else
7438 gcc_unreachable ();
7441 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7442 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7443 temporary (BASE_TMP) to fixup the address. Return the new memory address
7444 that is valid for reads or writes to a given register (SCALAR_REG). */
7447 rs6000_adjust_vec_address (rtx scalar_reg,
7448 rtx mem,
7449 rtx element,
7450 rtx base_tmp,
7451 machine_mode scalar_mode)
7453 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7454 rtx addr = XEXP (mem, 0);
7455 rtx element_offset;
7456 rtx new_addr;
7457 bool valid_addr_p;
7459 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7460 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7462 /* Calculate what we need to add to the address to get the element
7463 address. */
7464 if (CONST_INT_P (element))
7465 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7466 else
7468 int byte_shift = exact_log2 (scalar_size);
7469 gcc_assert (byte_shift >= 0);
7471 if (byte_shift == 0)
7472 element_offset = element;
7474 else
7476 if (TARGET_POWERPC64)
7477 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7478 else
7479 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7481 element_offset = base_tmp;
7485 /* Create the new address pointing to the element within the vector. If we
7486 are adding 0, we don't have to change the address. */
7487 if (element_offset == const0_rtx)
7488 new_addr = addr;
7490 /* A simple indirect address can be converted into a reg + offset
7491 address. */
7492 else if (REG_P (addr) || SUBREG_P (addr))
7493 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7495 /* Optimize D-FORM addresses with constant offset with a constant element, to
7496 include the element offset in the address directly. */
7497 else if (GET_CODE (addr) == PLUS)
7499 rtx op0 = XEXP (addr, 0);
7500 rtx op1 = XEXP (addr, 1);
7501 rtx insn;
7503 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7504 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7506 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7507 rtx offset_rtx = GEN_INT (offset);
7509 if (IN_RANGE (offset, -32768, 32767)
7510 && (scalar_size < 8 || (offset & 0x3) == 0))
7511 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7512 else
7514 emit_move_insn (base_tmp, offset_rtx);
7515 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7518 else
7520 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7521 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7523 /* Note, ADDI requires the register being added to be a base
7524 register. If the register was R0, load it up into the temporary
7525 and do the add. */
7526 if (op1_reg_p
7527 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7529 insn = gen_add3_insn (base_tmp, op1, element_offset);
7530 gcc_assert (insn != NULL_RTX);
7531 emit_insn (insn);
7534 else if (ele_reg_p
7535 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7537 insn = gen_add3_insn (base_tmp, element_offset, op1);
7538 gcc_assert (insn != NULL_RTX);
7539 emit_insn (insn);
7542 else
7544 emit_move_insn (base_tmp, op1);
7545 emit_insn (gen_add2_insn (base_tmp, element_offset));
7548 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7552 else
7554 emit_move_insn (base_tmp, addr);
7555 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7558 /* If we have a PLUS, we need to see whether the particular register class
7559 allows for D-FORM or X-FORM addressing. */
7560 if (GET_CODE (new_addr) == PLUS)
7562 rtx op1 = XEXP (new_addr, 1);
7563 addr_mask_type addr_mask;
7564 int scalar_regno = regno_or_subregno (scalar_reg);
7566 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7567 if (INT_REGNO_P (scalar_regno))
7568 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7570 else if (FP_REGNO_P (scalar_regno))
7571 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7573 else if (ALTIVEC_REGNO_P (scalar_regno))
7574 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7576 else
7577 gcc_unreachable ();
7579 if (REG_P (op1) || SUBREG_P (op1))
7580 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7581 else
7582 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7585 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7586 valid_addr_p = true;
7588 else
7589 valid_addr_p = false;
7591 if (!valid_addr_p)
7593 emit_move_insn (base_tmp, new_addr);
7594 new_addr = base_tmp;
7597 return change_address (mem, scalar_mode, new_addr);
7600 /* Split a variable vec_extract operation into the component instructions. */
7602 void
7603 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7604 rtx tmp_altivec)
7606 machine_mode mode = GET_MODE (src);
7607 machine_mode scalar_mode = GET_MODE (dest);
7608 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7609 int byte_shift = exact_log2 (scalar_size);
7611 gcc_assert (byte_shift >= 0);
7613 /* If we are given a memory address, optimize to load just the element. We
7614 don't have to adjust the vector element number on little endian
7615 systems. */
7616 if (MEM_P (src))
7618 gcc_assert (REG_P (tmp_gpr));
7619 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7620 tmp_gpr, scalar_mode));
7621 return;
7624 else if (REG_P (src) || SUBREG_P (src))
7626 int bit_shift = byte_shift + 3;
7627 rtx element2;
7628 int dest_regno = regno_or_subregno (dest);
7629 int src_regno = regno_or_subregno (src);
7630 int element_regno = regno_or_subregno (element);
7632 gcc_assert (REG_P (tmp_gpr));
7634 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7635 a general purpose register. */
7636 if (TARGET_P9_VECTOR
7637 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7638 && INT_REGNO_P (dest_regno)
7639 && ALTIVEC_REGNO_P (src_regno)
7640 && INT_REGNO_P (element_regno))
7642 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7643 rtx element_si = gen_rtx_REG (SImode, element_regno);
7645 if (mode == V16QImode)
7646 emit_insn (VECTOR_ELT_ORDER_BIG
7647 ? gen_vextublx (dest_si, element_si, src)
7648 : gen_vextubrx (dest_si, element_si, src));
7650 else if (mode == V8HImode)
7652 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7653 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7654 emit_insn (VECTOR_ELT_ORDER_BIG
7655 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7656 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7660 else
7662 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7663 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7664 emit_insn (VECTOR_ELT_ORDER_BIG
7665 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7666 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7669 return;
7673 gcc_assert (REG_P (tmp_altivec));
7675 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7676 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7677 will shift the element into the upper position (adding 3 to convert a
7678 byte shift into a bit shift). */
7679 if (scalar_size == 8)
7681 if (!VECTOR_ELT_ORDER_BIG)
7683 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7684 element2 = tmp_gpr;
7686 else
7687 element2 = element;
7689 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7690 bit. */
7691 emit_insn (gen_rtx_SET (tmp_gpr,
7692 gen_rtx_AND (DImode,
7693 gen_rtx_ASHIFT (DImode,
7694 element2,
7695 GEN_INT (6)),
7696 GEN_INT (64))));
7698 else
7700 if (!VECTOR_ELT_ORDER_BIG)
7702 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7704 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7705 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7706 element2 = tmp_gpr;
7708 else
7709 element2 = element;
7711 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7714 /* Get the value into the lower byte of the Altivec register where VSLO
7715 expects it. */
7716 if (TARGET_P9_VECTOR)
7717 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7718 else if (can_create_pseudo_p ())
7719 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7720 else
7722 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7723 emit_move_insn (tmp_di, tmp_gpr);
7724 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7727 /* Do the VSLO to get the value into the final location. */
7728 switch (mode)
7730 case E_V2DFmode:
7731 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7732 return;
7734 case E_V2DImode:
7735 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7736 return;
7738 case E_V4SFmode:
7740 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7741 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7742 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7743 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7744 tmp_altivec));
7746 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7747 return;
7750 case E_V4SImode:
7751 case E_V8HImode:
7752 case E_V16QImode:
7754 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7755 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7756 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7757 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7758 tmp_altivec));
7759 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7760 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7761 GEN_INT (64 - (8 * scalar_size))));
7762 return;
7765 default:
7766 gcc_unreachable ();
7769 return;
7771 else
7772 gcc_unreachable ();
7775 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7776 two SImode values. */
7778 static void
7779 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7781 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7783 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7785 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7786 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7788 emit_move_insn (dest, GEN_INT (const1 | const2));
7789 return;
7792 /* Put si1 into upper 32-bits of dest. */
7793 if (CONST_INT_P (si1))
7794 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7795 else
7797 /* Generate RLDIC. */
7798 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7799 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7800 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7801 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7802 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7803 emit_insn (gen_rtx_SET (dest, and_rtx));
7806 /* Put si2 into the temporary. */
7807 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7808 if (CONST_INT_P (si2))
7809 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7810 else
7811 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7813 /* Combine the two parts. */
7814 emit_insn (gen_iordi3 (dest, dest, tmp));
7815 return;
7818 /* Split a V4SI initialization. */
7820 void
7821 rs6000_split_v4si_init (rtx operands[])
7823 rtx dest = operands[0];
7825 /* Destination is a GPR, build up the two DImode parts in place. */
7826 if (REG_P (dest) || SUBREG_P (dest))
7828 int d_regno = regno_or_subregno (dest);
7829 rtx scalar1 = operands[1];
7830 rtx scalar2 = operands[2];
7831 rtx scalar3 = operands[3];
7832 rtx scalar4 = operands[4];
7833 rtx tmp1 = operands[5];
7834 rtx tmp2 = operands[6];
7836 /* Even though we only need one temporary (plus the destination, which
7837 has an early clobber constraint, try to use two temporaries, one for
7838 each double word created. That way the 2nd insn scheduling pass can
7839 rearrange things so the two parts are done in parallel. */
7840 if (BYTES_BIG_ENDIAN)
7842 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7843 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7844 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7845 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7847 else
7849 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7850 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7851 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7852 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7853 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7855 return;
7858 else
7859 gcc_unreachable ();
7862 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7863 selects whether the alignment is abi mandated, optional, or
7864 both abi and optional alignment. */
7866 unsigned int
7867 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7869 if (how != align_opt)
7871 if (TREE_CODE (type) == VECTOR_TYPE)
7873 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7875 if (align < 64)
7876 align = 64;
7878 else if (align < 128)
7879 align = 128;
7883 if (how != align_abi)
7885 if (TREE_CODE (type) == ARRAY_TYPE
7886 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7888 if (align < BITS_PER_WORD)
7889 align = BITS_PER_WORD;
7893 return align;
7896 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7897 instructions simply ignore the low bits; VSX memory instructions
7898 are aligned to 4 or 8 bytes. */
7900 static bool
7901 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7903 return (STRICT_ALIGNMENT
7904 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7905 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7906 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7907 && (int) align < VECTOR_ALIGN (mode)))));
7910 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7912 bool
7913 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7915 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7917 if (computed != 128)
7919 static bool warned;
7920 if (!warned && warn_psabi)
7922 warned = true;
7923 inform (input_location,
7924 "the layout of aggregates containing vectors with"
7925 " %d-byte alignment has changed in GCC 5",
7926 computed / BITS_PER_UNIT);
7929 /* In current GCC there is no special case. */
7930 return false;
7933 return false;
7936 /* AIX increases natural record alignment to doubleword if the first
7937 field is an FP double while the FP fields remain word aligned. */
7939 unsigned int
7940 rs6000_special_round_type_align (tree type, unsigned int computed,
7941 unsigned int specified)
7943 unsigned int align = MAX (computed, specified);
7944 tree field = TYPE_FIELDS (type);
7946 /* Skip all non field decls */
7947 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7948 field = DECL_CHAIN (field);
7950 if (field != NULL && field != type)
7952 type = TREE_TYPE (field);
7953 while (TREE_CODE (type) == ARRAY_TYPE)
7954 type = TREE_TYPE (type);
7956 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7957 align = MAX (align, 64);
7960 return align;
7963 /* Darwin increases record alignment to the natural alignment of
7964 the first field. */
7966 unsigned int
7967 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7968 unsigned int specified)
7970 unsigned int align = MAX (computed, specified);
7972 if (TYPE_PACKED (type))
7973 return align;
7975 /* Find the first field, looking down into aggregates. */
7976 do {
7977 tree field = TYPE_FIELDS (type);
7978 /* Skip all non field decls */
7979 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7980 field = DECL_CHAIN (field);
7981 if (! field)
7982 break;
7983 /* A packed field does not contribute any extra alignment. */
7984 if (DECL_PACKED (field))
7985 return align;
7986 type = TREE_TYPE (field);
7987 while (TREE_CODE (type) == ARRAY_TYPE)
7988 type = TREE_TYPE (type);
7989 } while (AGGREGATE_TYPE_P (type));
7991 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7992 align = MAX (align, TYPE_ALIGN (type));
7994 return align;
7997 /* Return 1 for an operand in small memory on V.4/eabi. */
8000 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8001 machine_mode mode ATTRIBUTE_UNUSED)
8003 #if TARGET_ELF
8004 rtx sym_ref;
8006 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8007 return 0;
8009 if (DEFAULT_ABI != ABI_V4)
8010 return 0;
8012 if (GET_CODE (op) == SYMBOL_REF)
8013 sym_ref = op;
8015 else if (GET_CODE (op) != CONST
8016 || GET_CODE (XEXP (op, 0)) != PLUS
8017 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8018 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8019 return 0;
8021 else
8023 rtx sum = XEXP (op, 0);
8024 HOST_WIDE_INT summand;
8026 /* We have to be careful here, because it is the referenced address
8027 that must be 32k from _SDA_BASE_, not just the symbol. */
8028 summand = INTVAL (XEXP (sum, 1));
8029 if (summand < 0 || summand > g_switch_value)
8030 return 0;
8032 sym_ref = XEXP (sum, 0);
8035 return SYMBOL_REF_SMALL_P (sym_ref);
8036 #else
8037 return 0;
8038 #endif
8041 /* Return true if either operand is a general purpose register. */
8043 bool
8044 gpr_or_gpr_p (rtx op0, rtx op1)
8046 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8047 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8050 /* Return true if this is a move direct operation between GPR registers and
8051 floating point/VSX registers. */
8053 bool
8054 direct_move_p (rtx op0, rtx op1)
8056 int regno0, regno1;
8058 if (!REG_P (op0) || !REG_P (op1))
8059 return false;
8061 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8062 return false;
8064 regno0 = REGNO (op0);
8065 regno1 = REGNO (op1);
8066 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8067 return false;
8069 if (INT_REGNO_P (regno0))
8070 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8072 else if (INT_REGNO_P (regno1))
8074 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8075 return true;
8077 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8078 return true;
8081 return false;
8084 /* Return true if the OFFSET is valid for the quad address instructions that
8085 use d-form (register + offset) addressing. */
8087 static inline bool
8088 quad_address_offset_p (HOST_WIDE_INT offset)
8090 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8093 /* Return true if the ADDR is an acceptable address for a quad memory
8094 operation of mode MODE (either LQ/STQ for general purpose registers, or
8095 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8096 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8097 3.0 LXV/STXV instruction. */
8099 bool
8100 quad_address_p (rtx addr, machine_mode mode, bool strict)
8102 rtx op0, op1;
8104 if (GET_MODE_SIZE (mode) != 16)
8105 return false;
8107 if (legitimate_indirect_address_p (addr, strict))
8108 return true;
8110 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8111 return false;
8113 if (GET_CODE (addr) != PLUS)
8114 return false;
8116 op0 = XEXP (addr, 0);
8117 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8118 return false;
8120 op1 = XEXP (addr, 1);
8121 if (!CONST_INT_P (op1))
8122 return false;
8124 return quad_address_offset_p (INTVAL (op1));
8127 /* Return true if this is a load or store quad operation. This function does
8128 not handle the atomic quad memory instructions. */
8130 bool
8131 quad_load_store_p (rtx op0, rtx op1)
8133 bool ret;
8135 if (!TARGET_QUAD_MEMORY)
8136 ret = false;
8138 else if (REG_P (op0) && MEM_P (op1))
8139 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8140 && quad_memory_operand (op1, GET_MODE (op1))
8141 && !reg_overlap_mentioned_p (op0, op1));
8143 else if (MEM_P (op0) && REG_P (op1))
8144 ret = (quad_memory_operand (op0, GET_MODE (op0))
8145 && quad_int_reg_operand (op1, GET_MODE (op1)));
8147 else
8148 ret = false;
8150 if (TARGET_DEBUG_ADDR)
8152 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8153 ret ? "true" : "false");
8154 debug_rtx (gen_rtx_SET (op0, op1));
8157 return ret;
8160 /* Given an address, return a constant offset term if one exists. */
8162 static rtx
8163 address_offset (rtx op)
8165 if (GET_CODE (op) == PRE_INC
8166 || GET_CODE (op) == PRE_DEC)
8167 op = XEXP (op, 0);
8168 else if (GET_CODE (op) == PRE_MODIFY
8169 || GET_CODE (op) == LO_SUM)
8170 op = XEXP (op, 1);
8172 if (GET_CODE (op) == CONST)
8173 op = XEXP (op, 0);
8175 if (GET_CODE (op) == PLUS)
8176 op = XEXP (op, 1);
8178 if (CONST_INT_P (op))
8179 return op;
8181 return NULL_RTX;
8184 /* Return true if the MEM operand is a memory operand suitable for use
8185 with a (full width, possibly multiple) gpr load/store. On
8186 powerpc64 this means the offset must be divisible by 4.
8187 Implements 'Y' constraint.
8189 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8190 a constraint function we know the operand has satisfied a suitable
8191 memory predicate. Also accept some odd rtl generated by reload
8192 (see rs6000_legitimize_reload_address for various forms). It is
8193 important that reload rtl be accepted by appropriate constraints
8194 but not by the operand predicate.
8196 Offsetting a lo_sum should not be allowed, except where we know by
8197 alignment that a 32k boundary is not crossed, but see the ???
8198 comment in rs6000_legitimize_reload_address. Note that by
8199 "offsetting" here we mean a further offset to access parts of the
8200 MEM. It's fine to have a lo_sum where the inner address is offset
8201 from a sym, since the same sym+offset will appear in the high part
8202 of the address calculation. */
8204 bool
8205 mem_operand_gpr (rtx op, machine_mode mode)
8207 unsigned HOST_WIDE_INT offset;
8208 int extra;
8209 rtx addr = XEXP (op, 0);
8211 op = address_offset (addr);
8212 if (op == NULL_RTX)
8213 return true;
8215 offset = INTVAL (op);
8216 if (TARGET_POWERPC64 && (offset & 3) != 0)
8217 return false;
8219 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8220 if (extra < 0)
8221 extra = 0;
8223 if (GET_CODE (addr) == LO_SUM)
8224 /* For lo_sum addresses, we must allow any offset except one that
8225 causes a wrap, so test only the low 16 bits. */
8226 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8228 return offset + 0x8000 < 0x10000u - extra;
8231 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8232 enforce an offset divisible by 4 even for 32-bit. */
8234 bool
8235 mem_operand_ds_form (rtx op, machine_mode mode)
8237 unsigned HOST_WIDE_INT offset;
8238 int extra;
8239 rtx addr = XEXP (op, 0);
8241 if (!offsettable_address_p (false, mode, addr))
8242 return false;
8244 op = address_offset (addr);
8245 if (op == NULL_RTX)
8246 return true;
8248 offset = INTVAL (op);
8249 if ((offset & 3) != 0)
8250 return false;
8252 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8253 if (extra < 0)
8254 extra = 0;
8256 if (GET_CODE (addr) == LO_SUM)
8257 /* For lo_sum addresses, we must allow any offset except one that
8258 causes a wrap, so test only the low 16 bits. */
8259 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8261 return offset + 0x8000 < 0x10000u - extra;
8264 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8266 static bool
8267 reg_offset_addressing_ok_p (machine_mode mode)
8269 switch (mode)
8271 case E_V16QImode:
8272 case E_V8HImode:
8273 case E_V4SFmode:
8274 case E_V4SImode:
8275 case E_V2DFmode:
8276 case E_V2DImode:
8277 case E_V1TImode:
8278 case E_TImode:
8279 case E_TFmode:
8280 case E_KFmode:
8281 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8282 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8283 a vector mode, if we want to use the VSX registers to move it around,
8284 we need to restrict ourselves to reg+reg addressing. Similarly for
8285 IEEE 128-bit floating point that is passed in a single vector
8286 register. */
8287 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8288 return mode_supports_vsx_dform_quad (mode);
8289 break;
8291 case E_V2SImode:
8292 case E_V2SFmode:
8293 /* Paired vector modes. Only reg+reg addressing is valid. */
8294 if (TARGET_PAIRED_FLOAT)
8295 return false;
8296 break;
8298 case E_SDmode:
8299 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8300 addressing for the LFIWZX and STFIWX instructions. */
8301 if (TARGET_NO_SDMODE_STACK)
8302 return false;
8303 break;
8305 default:
8306 break;
8309 return true;
8312 static bool
8313 virtual_stack_registers_memory_p (rtx op)
8315 int regnum;
8317 if (GET_CODE (op) == REG)
8318 regnum = REGNO (op);
8320 else if (GET_CODE (op) == PLUS
8321 && GET_CODE (XEXP (op, 0)) == REG
8322 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8323 regnum = REGNO (XEXP (op, 0));
8325 else
8326 return false;
8328 return (regnum >= FIRST_VIRTUAL_REGISTER
8329 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8332 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8333 is known to not straddle a 32k boundary. This function is used
8334 to determine whether -mcmodel=medium code can use TOC pointer
8335 relative addressing for OP. This means the alignment of the TOC
8336 pointer must also be taken into account, and unfortunately that is
8337 only 8 bytes. */
8339 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8340 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8341 #endif
8343 static bool
8344 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8345 machine_mode mode)
8347 tree decl;
8348 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8350 if (GET_CODE (op) != SYMBOL_REF)
8351 return false;
8353 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8354 SYMBOL_REF. */
8355 if (mode_supports_vsx_dform_quad (mode))
8356 return false;
8358 dsize = GET_MODE_SIZE (mode);
8359 decl = SYMBOL_REF_DECL (op);
8360 if (!decl)
8362 if (dsize == 0)
8363 return false;
8365 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8366 replacing memory addresses with an anchor plus offset. We
8367 could find the decl by rummaging around in the block->objects
8368 VEC for the given offset but that seems like too much work. */
8369 dalign = BITS_PER_UNIT;
8370 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8371 && SYMBOL_REF_ANCHOR_P (op)
8372 && SYMBOL_REF_BLOCK (op) != NULL)
8374 struct object_block *block = SYMBOL_REF_BLOCK (op);
8376 dalign = block->alignment;
8377 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8379 else if (CONSTANT_POOL_ADDRESS_P (op))
8381 /* It would be nice to have get_pool_align().. */
8382 machine_mode cmode = get_pool_mode (op);
8384 dalign = GET_MODE_ALIGNMENT (cmode);
8387 else if (DECL_P (decl))
8389 dalign = DECL_ALIGN (decl);
8391 if (dsize == 0)
8393 /* Allow BLKmode when the entire object is known to not
8394 cross a 32k boundary. */
8395 if (!DECL_SIZE_UNIT (decl))
8396 return false;
8398 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8399 return false;
8401 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8402 if (dsize > 32768)
8403 return false;
8405 dalign /= BITS_PER_UNIT;
8406 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8407 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8408 return dalign >= dsize;
8411 else
8412 gcc_unreachable ();
8414 /* Find how many bits of the alignment we know for this access. */
8415 dalign /= BITS_PER_UNIT;
8416 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8417 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8418 mask = dalign - 1;
8419 lsb = offset & -offset;
8420 mask &= lsb - 1;
8421 dalign = mask + 1;
8423 return dalign >= dsize;
8426 static bool
8427 constant_pool_expr_p (rtx op)
8429 rtx base, offset;
8431 split_const (op, &base, &offset);
8432 return (GET_CODE (base) == SYMBOL_REF
8433 && CONSTANT_POOL_ADDRESS_P (base)
8434 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8437 /* These are only used to pass through from print_operand/print_operand_address
8438 to rs6000_output_addr_const_extra over the intervening function
8439 output_addr_const which is not target code. */
8440 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8442 /* Return true if OP is a toc pointer relative address (the output
8443 of create_TOC_reference). If STRICT, do not match non-split
8444 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8445 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8446 TOCREL_OFFSET_RET respectively. */
8448 bool
8449 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8450 const_rtx *tocrel_offset_ret)
8452 if (!TARGET_TOC)
8453 return false;
8455 if (TARGET_CMODEL != CMODEL_SMALL)
8457 /* When strict ensure we have everything tidy. */
8458 if (strict
8459 && !(GET_CODE (op) == LO_SUM
8460 && REG_P (XEXP (op, 0))
8461 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8462 return false;
8464 /* When not strict, allow non-split TOC addresses and also allow
8465 (lo_sum (high ..)) TOC addresses created during reload. */
8466 if (GET_CODE (op) == LO_SUM)
8467 op = XEXP (op, 1);
8470 const_rtx tocrel_base = op;
8471 const_rtx tocrel_offset = const0_rtx;
8473 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8475 tocrel_base = XEXP (op, 0);
8476 tocrel_offset = XEXP (op, 1);
8479 if (tocrel_base_ret)
8480 *tocrel_base_ret = tocrel_base;
8481 if (tocrel_offset_ret)
8482 *tocrel_offset_ret = tocrel_offset;
8484 return (GET_CODE (tocrel_base) == UNSPEC
8485 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8488 /* Return true if X is a constant pool address, and also for cmodel=medium
8489 if X is a toc-relative address known to be offsettable within MODE. */
8491 bool
8492 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8493 bool strict)
8495 const_rtx tocrel_base, tocrel_offset;
8496 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8497 && (TARGET_CMODEL != CMODEL_MEDIUM
8498 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8499 || mode == QImode
8500 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8501 INTVAL (tocrel_offset), mode)));
8504 static bool
8505 legitimate_small_data_p (machine_mode mode, rtx x)
8507 return (DEFAULT_ABI == ABI_V4
8508 && !flag_pic && !TARGET_TOC
8509 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8510 && small_data_operand (x, mode));
8513 bool
8514 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8515 bool strict, bool worst_case)
8517 unsigned HOST_WIDE_INT offset;
8518 unsigned int extra;
8520 if (GET_CODE (x) != PLUS)
8521 return false;
8522 if (!REG_P (XEXP (x, 0)))
8523 return false;
8524 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8525 return false;
8526 if (mode_supports_vsx_dform_quad (mode))
8527 return quad_address_p (x, mode, strict);
8528 if (!reg_offset_addressing_ok_p (mode))
8529 return virtual_stack_registers_memory_p (x);
8530 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8531 return true;
8532 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8533 return false;
8535 offset = INTVAL (XEXP (x, 1));
8536 extra = 0;
8537 switch (mode)
8539 case E_V2SImode:
8540 case E_V2SFmode:
8541 /* Paired single modes: offset addressing isn't valid. */
8542 return false;
8544 case E_DFmode:
8545 case E_DDmode:
8546 case E_DImode:
8547 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8548 addressing. */
8549 if (VECTOR_MEM_VSX_P (mode))
8550 return false;
8552 if (!worst_case)
8553 break;
8554 if (!TARGET_POWERPC64)
8555 extra = 4;
8556 else if (offset & 3)
8557 return false;
8558 break;
8560 case E_TFmode:
8561 case E_IFmode:
8562 case E_KFmode:
8563 case E_TDmode:
8564 case E_TImode:
8565 case E_PTImode:
8566 extra = 8;
8567 if (!worst_case)
8568 break;
8569 if (!TARGET_POWERPC64)
8570 extra = 12;
8571 else if (offset & 3)
8572 return false;
8573 break;
8575 default:
8576 break;
8579 offset += 0x8000;
8580 return offset < 0x10000 - extra;
8583 bool
8584 legitimate_indexed_address_p (rtx x, int strict)
8586 rtx op0, op1;
8588 if (GET_CODE (x) != PLUS)
8589 return false;
8591 op0 = XEXP (x, 0);
8592 op1 = XEXP (x, 1);
8594 return (REG_P (op0) && REG_P (op1)
8595 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8596 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8597 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8598 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8601 bool
8602 avoiding_indexed_address_p (machine_mode mode)
8604 /* Avoid indexed addressing for modes that have non-indexed
8605 load/store instruction forms. */
8606 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8609 bool
8610 legitimate_indirect_address_p (rtx x, int strict)
8612 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8615 bool
8616 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8618 if (!TARGET_MACHO || !flag_pic
8619 || mode != SImode || GET_CODE (x) != MEM)
8620 return false;
8621 x = XEXP (x, 0);
8623 if (GET_CODE (x) != LO_SUM)
8624 return false;
8625 if (GET_CODE (XEXP (x, 0)) != REG)
8626 return false;
8627 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8628 return false;
8629 x = XEXP (x, 1);
8631 return CONSTANT_P (x);
8634 static bool
8635 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8637 if (GET_CODE (x) != LO_SUM)
8638 return false;
8639 if (GET_CODE (XEXP (x, 0)) != REG)
8640 return false;
8641 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8642 return false;
8643 /* quad word addresses are restricted, and we can't use LO_SUM. */
8644 if (mode_supports_vsx_dform_quad (mode))
8645 return false;
8646 x = XEXP (x, 1);
8648 if (TARGET_ELF || TARGET_MACHO)
8650 bool large_toc_ok;
8652 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8653 return false;
8654 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8655 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8656 recognizes some LO_SUM addresses as valid although this
8657 function says opposite. In most cases, LRA through different
8658 transformations can generate correct code for address reloads.
8659 It can not manage only some LO_SUM cases. So we need to add
8660 code analogous to one in rs6000_legitimize_reload_address for
8661 LOW_SUM here saying that some addresses are still valid. */
8662 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8663 && small_toc_ref (x, VOIDmode));
8664 if (TARGET_TOC && ! large_toc_ok)
8665 return false;
8666 if (GET_MODE_NUNITS (mode) != 1)
8667 return false;
8668 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8669 && !(/* ??? Assume floating point reg based on mode? */
8670 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8671 && (mode == DFmode || mode == DDmode)))
8672 return false;
8674 return CONSTANT_P (x) || large_toc_ok;
8677 return false;
8681 /* Try machine-dependent ways of modifying an illegitimate address
8682 to be legitimate. If we find one, return the new, valid address.
8683 This is used from only one place: `memory_address' in explow.c.
8685 OLDX is the address as it was before break_out_memory_refs was
8686 called. In some cases it is useful to look at this to decide what
8687 needs to be done.
8689 It is always safe for this function to do nothing. It exists to
8690 recognize opportunities to optimize the output.
8692 On RS/6000, first check for the sum of a register with a constant
8693 integer that is out of range. If so, generate code to add the
8694 constant with the low-order 16 bits masked to the register and force
8695 this result into another register (this can be done with `cau').
8696 Then generate an address of REG+(CONST&0xffff), allowing for the
8697 possibility of bit 16 being a one.
8699 Then check for the sum of a register and something not constant, try to
8700 load the other things into a register and return the sum. */
8702 static rtx
8703 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8704 machine_mode mode)
8706 unsigned int extra;
8708 if (!reg_offset_addressing_ok_p (mode)
8709 || mode_supports_vsx_dform_quad (mode))
8711 if (virtual_stack_registers_memory_p (x))
8712 return x;
8714 /* In theory we should not be seeing addresses of the form reg+0,
8715 but just in case it is generated, optimize it away. */
8716 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8717 return force_reg (Pmode, XEXP (x, 0));
8719 /* For TImode with load/store quad, restrict addresses to just a single
8720 pointer, so it works with both GPRs and VSX registers. */
8721 /* Make sure both operands are registers. */
8722 else if (GET_CODE (x) == PLUS
8723 && (mode != TImode || !TARGET_VSX))
8724 return gen_rtx_PLUS (Pmode,
8725 force_reg (Pmode, XEXP (x, 0)),
8726 force_reg (Pmode, XEXP (x, 1)));
8727 else
8728 return force_reg (Pmode, x);
8730 if (GET_CODE (x) == SYMBOL_REF)
8732 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8733 if (model != 0)
8734 return rs6000_legitimize_tls_address (x, model);
8737 extra = 0;
8738 switch (mode)
8740 case E_TFmode:
8741 case E_TDmode:
8742 case E_TImode:
8743 case E_PTImode:
8744 case E_IFmode:
8745 case E_KFmode:
8746 /* As in legitimate_offset_address_p we do not assume
8747 worst-case. The mode here is just a hint as to the registers
8748 used. A TImode is usually in gprs, but may actually be in
8749 fprs. Leave worst-case scenario for reload to handle via
8750 insn constraints. PTImode is only GPRs. */
8751 extra = 8;
8752 break;
8753 default:
8754 break;
8757 if (GET_CODE (x) == PLUS
8758 && GET_CODE (XEXP (x, 0)) == REG
8759 && GET_CODE (XEXP (x, 1)) == CONST_INT
8760 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8761 >= 0x10000 - extra)
8762 && !PAIRED_VECTOR_MODE (mode))
8764 HOST_WIDE_INT high_int, low_int;
8765 rtx sum;
8766 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8767 if (low_int >= 0x8000 - extra)
8768 low_int = 0;
8769 high_int = INTVAL (XEXP (x, 1)) - low_int;
8770 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8771 GEN_INT (high_int)), 0);
8772 return plus_constant (Pmode, sum, low_int);
8774 else if (GET_CODE (x) == PLUS
8775 && GET_CODE (XEXP (x, 0)) == REG
8776 && GET_CODE (XEXP (x, 1)) != CONST_INT
8777 && GET_MODE_NUNITS (mode) == 1
8778 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8779 || (/* ??? Assume floating point reg based on mode? */
8780 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8781 && (mode == DFmode || mode == DDmode)))
8782 && !avoiding_indexed_address_p (mode))
8784 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8785 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8787 else if (PAIRED_VECTOR_MODE (mode))
8789 if (mode == DImode)
8790 return x;
8791 /* We accept [reg + reg]. */
8793 if (GET_CODE (x) == PLUS)
8795 rtx op1 = XEXP (x, 0);
8796 rtx op2 = XEXP (x, 1);
8797 rtx y;
8799 op1 = force_reg (Pmode, op1);
8800 op2 = force_reg (Pmode, op2);
8802 /* We can't always do [reg + reg] for these, because [reg +
8803 reg + offset] is not a legitimate addressing mode. */
8804 y = gen_rtx_PLUS (Pmode, op1, op2);
8806 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8807 return force_reg (Pmode, y);
8808 else
8809 return y;
8812 return force_reg (Pmode, x);
8814 else if ((TARGET_ELF
8815 #if TARGET_MACHO
8816 || !MACHO_DYNAMIC_NO_PIC_P
8817 #endif
8819 && TARGET_32BIT
8820 && TARGET_NO_TOC
8821 && ! flag_pic
8822 && GET_CODE (x) != CONST_INT
8823 && GET_CODE (x) != CONST_WIDE_INT
8824 && GET_CODE (x) != CONST_DOUBLE
8825 && CONSTANT_P (x)
8826 && GET_MODE_NUNITS (mode) == 1
8827 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8828 || (/* ??? Assume floating point reg based on mode? */
8829 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8830 && (mode == DFmode || mode == DDmode))))
8832 rtx reg = gen_reg_rtx (Pmode);
8833 if (TARGET_ELF)
8834 emit_insn (gen_elf_high (reg, x));
8835 else
8836 emit_insn (gen_macho_high (reg, x));
8837 return gen_rtx_LO_SUM (Pmode, reg, x);
8839 else if (TARGET_TOC
8840 && GET_CODE (x) == SYMBOL_REF
8841 && constant_pool_expr_p (x)
8842 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8843 return create_TOC_reference (x, NULL_RTX);
8844 else
8845 return x;
8848 /* Debug version of rs6000_legitimize_address. */
8849 static rtx
8850 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8852 rtx ret;
8853 rtx_insn *insns;
8855 start_sequence ();
8856 ret = rs6000_legitimize_address (x, oldx, mode);
8857 insns = get_insns ();
8858 end_sequence ();
8860 if (ret != x)
8862 fprintf (stderr,
8863 "\nrs6000_legitimize_address: mode %s, old code %s, "
8864 "new code %s, modified\n",
8865 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8866 GET_RTX_NAME (GET_CODE (ret)));
8868 fprintf (stderr, "Original address:\n");
8869 debug_rtx (x);
8871 fprintf (stderr, "oldx:\n");
8872 debug_rtx (oldx);
8874 fprintf (stderr, "New address:\n");
8875 debug_rtx (ret);
8877 if (insns)
8879 fprintf (stderr, "Insns added:\n");
8880 debug_rtx_list (insns, 20);
8883 else
8885 fprintf (stderr,
8886 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8887 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8889 debug_rtx (x);
8892 if (insns)
8893 emit_insn (insns);
8895 return ret;
8898 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8899 We need to emit DTP-relative relocations. */
8901 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8902 static void
8903 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8905 switch (size)
8907 case 4:
8908 fputs ("\t.long\t", file);
8909 break;
8910 case 8:
8911 fputs (DOUBLE_INT_ASM_OP, file);
8912 break;
8913 default:
8914 gcc_unreachable ();
8916 output_addr_const (file, x);
8917 if (TARGET_ELF)
8918 fputs ("@dtprel+0x8000", file);
8919 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8921 switch (SYMBOL_REF_TLS_MODEL (x))
8923 case 0:
8924 break;
8925 case TLS_MODEL_LOCAL_EXEC:
8926 fputs ("@le", file);
8927 break;
8928 case TLS_MODEL_INITIAL_EXEC:
8929 fputs ("@ie", file);
8930 break;
8931 case TLS_MODEL_GLOBAL_DYNAMIC:
8932 case TLS_MODEL_LOCAL_DYNAMIC:
8933 fputs ("@m", file);
8934 break;
8935 default:
8936 gcc_unreachable ();
8941 /* Return true if X is a symbol that refers to real (rather than emulated)
8942 TLS. */
8944 static bool
8945 rs6000_real_tls_symbol_ref_p (rtx x)
8947 return (GET_CODE (x) == SYMBOL_REF
8948 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8951 /* In the name of slightly smaller debug output, and to cater to
8952 general assembler lossage, recognize various UNSPEC sequences
8953 and turn them back into a direct symbol reference. */
8955 static rtx
8956 rs6000_delegitimize_address (rtx orig_x)
8958 rtx x, y, offset;
8960 orig_x = delegitimize_mem_from_attrs (orig_x);
8961 x = orig_x;
8962 if (MEM_P (x))
8963 x = XEXP (x, 0);
8965 y = x;
8966 if (TARGET_CMODEL != CMODEL_SMALL
8967 && GET_CODE (y) == LO_SUM)
8968 y = XEXP (y, 1);
8970 offset = NULL_RTX;
8971 if (GET_CODE (y) == PLUS
8972 && GET_MODE (y) == Pmode
8973 && CONST_INT_P (XEXP (y, 1)))
8975 offset = XEXP (y, 1);
8976 y = XEXP (y, 0);
8979 if (GET_CODE (y) == UNSPEC
8980 && XINT (y, 1) == UNSPEC_TOCREL)
8982 y = XVECEXP (y, 0, 0);
8984 #ifdef HAVE_AS_TLS
8985 /* Do not associate thread-local symbols with the original
8986 constant pool symbol. */
8987 if (TARGET_XCOFF
8988 && GET_CODE (y) == SYMBOL_REF
8989 && CONSTANT_POOL_ADDRESS_P (y)
8990 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8991 return orig_x;
8992 #endif
8994 if (offset != NULL_RTX)
8995 y = gen_rtx_PLUS (Pmode, y, offset);
8996 if (!MEM_P (orig_x))
8997 return y;
8998 else
8999 return replace_equiv_address_nv (orig_x, y);
9002 if (TARGET_MACHO
9003 && GET_CODE (orig_x) == LO_SUM
9004 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9006 y = XEXP (XEXP (orig_x, 1), 0);
9007 if (GET_CODE (y) == UNSPEC
9008 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9009 return XVECEXP (y, 0, 0);
9012 return orig_x;
9015 /* Return true if X shouldn't be emitted into the debug info.
9016 The linker doesn't like .toc section references from
9017 .debug_* sections, so reject .toc section symbols. */
9019 static bool
9020 rs6000_const_not_ok_for_debug_p (rtx x)
9022 if (GET_CODE (x) == UNSPEC)
9023 return true;
9024 if (GET_CODE (x) == SYMBOL_REF
9025 && CONSTANT_POOL_ADDRESS_P (x))
9027 rtx c = get_pool_constant (x);
9028 machine_mode cmode = get_pool_mode (x);
9029 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9030 return true;
9033 return false;
9037 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9039 static bool
9040 rs6000_legitimate_combined_insn (rtx_insn *insn)
9042 int icode = INSN_CODE (insn);
9044 /* Reject creating doloop insns. Combine should not be allowed
9045 to create these for a number of reasons:
9046 1) In a nested loop, if combine creates one of these in an
9047 outer loop and the register allocator happens to allocate ctr
9048 to the outer loop insn, then the inner loop can't use ctr.
9049 Inner loops ought to be more highly optimized.
9050 2) Combine often wants to create one of these from what was
9051 originally a three insn sequence, first combining the three
9052 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9053 allocated ctr, the splitter takes use back to the three insn
9054 sequence. It's better to stop combine at the two insn
9055 sequence.
9056 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9057 insns, the register allocator sometimes uses floating point
9058 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9059 jump insn and output reloads are not implemented for jumps,
9060 the ctrsi/ctrdi splitters need to handle all possible cases.
9061 That's a pain, and it gets to be seriously difficult when a
9062 splitter that runs after reload needs memory to transfer from
9063 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9064 for the difficult case. It's better to not create problems
9065 in the first place. */
9066 if (icode != CODE_FOR_nothing
9067 && (icode == CODE_FOR_ctrsi_internal1
9068 || icode == CODE_FOR_ctrdi_internal1
9069 || icode == CODE_FOR_ctrsi_internal2
9070 || icode == CODE_FOR_ctrdi_internal2))
9071 return false;
9073 return true;
9076 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9078 static GTY(()) rtx rs6000_tls_symbol;
9079 static rtx
9080 rs6000_tls_get_addr (void)
9082 if (!rs6000_tls_symbol)
9083 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9085 return rs6000_tls_symbol;
9088 /* Construct the SYMBOL_REF for TLS GOT references. */
9090 static GTY(()) rtx rs6000_got_symbol;
9091 static rtx
9092 rs6000_got_sym (void)
9094 if (!rs6000_got_symbol)
9096 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9097 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9098 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9101 return rs6000_got_symbol;
9104 /* AIX Thread-Local Address support. */
9106 static rtx
9107 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9109 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9110 const char *name;
9111 char *tlsname;
9113 name = XSTR (addr, 0);
9114 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9115 or the symbol will be in TLS private data section. */
9116 if (name[strlen (name) - 1] != ']'
9117 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9118 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9120 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9121 strcpy (tlsname, name);
9122 strcat (tlsname,
9123 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9124 tlsaddr = copy_rtx (addr);
9125 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9127 else
9128 tlsaddr = addr;
9130 /* Place addr into TOC constant pool. */
9131 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9133 /* Output the TOC entry and create the MEM referencing the value. */
9134 if (constant_pool_expr_p (XEXP (sym, 0))
9135 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9137 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9138 mem = gen_const_mem (Pmode, tocref);
9139 set_mem_alias_set (mem, get_TOC_alias_set ());
9141 else
9142 return sym;
9144 /* Use global-dynamic for local-dynamic. */
9145 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9146 || model == TLS_MODEL_LOCAL_DYNAMIC)
9148 /* Create new TOC reference for @m symbol. */
9149 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9150 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9151 strcpy (tlsname, "*LCM");
9152 strcat (tlsname, name + 3);
9153 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9154 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9155 tocref = create_TOC_reference (modaddr, NULL_RTX);
9156 rtx modmem = gen_const_mem (Pmode, tocref);
9157 set_mem_alias_set (modmem, get_TOC_alias_set ());
9159 rtx modreg = gen_reg_rtx (Pmode);
9160 emit_insn (gen_rtx_SET (modreg, modmem));
9162 tmpreg = gen_reg_rtx (Pmode);
9163 emit_insn (gen_rtx_SET (tmpreg, mem));
9165 dest = gen_reg_rtx (Pmode);
9166 if (TARGET_32BIT)
9167 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9168 else
9169 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9170 return dest;
9172 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9173 else if (TARGET_32BIT)
9175 tlsreg = gen_reg_rtx (SImode);
9176 emit_insn (gen_tls_get_tpointer (tlsreg));
9178 else
9179 tlsreg = gen_rtx_REG (DImode, 13);
9181 /* Load the TOC value into temporary register. */
9182 tmpreg = gen_reg_rtx (Pmode);
9183 emit_insn (gen_rtx_SET (tmpreg, mem));
9184 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9185 gen_rtx_MINUS (Pmode, addr, tlsreg));
9187 /* Add TOC symbol value to TLS pointer. */
9188 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9190 return dest;
9193 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9194 this (thread-local) address. */
9196 static rtx
9197 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9199 rtx dest, insn;
9201 if (TARGET_XCOFF)
9202 return rs6000_legitimize_tls_address_aix (addr, model);
9204 dest = gen_reg_rtx (Pmode);
9205 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9207 rtx tlsreg;
9209 if (TARGET_64BIT)
9211 tlsreg = gen_rtx_REG (Pmode, 13);
9212 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9214 else
9216 tlsreg = gen_rtx_REG (Pmode, 2);
9217 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9219 emit_insn (insn);
9221 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9223 rtx tlsreg, tmp;
9225 tmp = gen_reg_rtx (Pmode);
9226 if (TARGET_64BIT)
9228 tlsreg = gen_rtx_REG (Pmode, 13);
9229 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9231 else
9233 tlsreg = gen_rtx_REG (Pmode, 2);
9234 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9236 emit_insn (insn);
9237 if (TARGET_64BIT)
9238 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9239 else
9240 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9241 emit_insn (insn);
9243 else
9245 rtx r3, got, tga, tmp1, tmp2, call_insn;
9247 /* We currently use relocations like @got@tlsgd for tls, which
9248 means the linker will handle allocation of tls entries, placing
9249 them in the .got section. So use a pointer to the .got section,
9250 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9251 or to secondary GOT sections used by 32-bit -fPIC. */
9252 if (TARGET_64BIT)
9253 got = gen_rtx_REG (Pmode, 2);
9254 else
9256 if (flag_pic == 1)
9257 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9258 else
9260 rtx gsym = rs6000_got_sym ();
9261 got = gen_reg_rtx (Pmode);
9262 if (flag_pic == 0)
9263 rs6000_emit_move (got, gsym, Pmode);
9264 else
9266 rtx mem, lab;
9268 tmp1 = gen_reg_rtx (Pmode);
9269 tmp2 = gen_reg_rtx (Pmode);
9270 mem = gen_const_mem (Pmode, tmp1);
9271 lab = gen_label_rtx ();
9272 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9273 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9274 if (TARGET_LINK_STACK)
9275 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9276 emit_move_insn (tmp2, mem);
9277 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9278 set_unique_reg_note (last, REG_EQUAL, gsym);
9283 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9285 tga = rs6000_tls_get_addr ();
9286 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9287 const0_rtx, Pmode);
9289 r3 = gen_rtx_REG (Pmode, 3);
9290 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9292 if (TARGET_64BIT)
9293 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9294 else
9295 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9297 else if (DEFAULT_ABI == ABI_V4)
9298 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9299 else
9300 gcc_unreachable ();
9301 call_insn = last_call_insn ();
9302 PATTERN (call_insn) = insn;
9303 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9304 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9305 pic_offset_table_rtx);
9307 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9309 tga = rs6000_tls_get_addr ();
9310 tmp1 = gen_reg_rtx (Pmode);
9311 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9312 const0_rtx, Pmode);
9314 r3 = gen_rtx_REG (Pmode, 3);
9315 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9317 if (TARGET_64BIT)
9318 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9319 else
9320 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9322 else if (DEFAULT_ABI == ABI_V4)
9323 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9324 else
9325 gcc_unreachable ();
9326 call_insn = last_call_insn ();
9327 PATTERN (call_insn) = insn;
9328 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9329 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9330 pic_offset_table_rtx);
9332 if (rs6000_tls_size == 16)
9334 if (TARGET_64BIT)
9335 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9336 else
9337 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9339 else if (rs6000_tls_size == 32)
9341 tmp2 = gen_reg_rtx (Pmode);
9342 if (TARGET_64BIT)
9343 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9344 else
9345 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9346 emit_insn (insn);
9347 if (TARGET_64BIT)
9348 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9349 else
9350 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9352 else
9354 tmp2 = gen_reg_rtx (Pmode);
9355 if (TARGET_64BIT)
9356 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9357 else
9358 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9359 emit_insn (insn);
9360 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9362 emit_insn (insn);
9364 else
9366 /* IE, or 64-bit offset LE. */
9367 tmp2 = gen_reg_rtx (Pmode);
9368 if (TARGET_64BIT)
9369 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9370 else
9371 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9372 emit_insn (insn);
9373 if (TARGET_64BIT)
9374 insn = gen_tls_tls_64 (dest, tmp2, addr);
9375 else
9376 insn = gen_tls_tls_32 (dest, tmp2, addr);
9377 emit_insn (insn);
9381 return dest;
9384 /* Only create the global variable for the stack protect guard if we are using
9385 the global flavor of that guard. */
9386 static tree
9387 rs6000_init_stack_protect_guard (void)
9389 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9390 return default_stack_protect_guard ();
9392 return NULL_TREE;
9395 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9397 static bool
9398 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9400 if (GET_CODE (x) == HIGH
9401 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9402 return true;
9404 /* A TLS symbol in the TOC cannot contain a sum. */
9405 if (GET_CODE (x) == CONST
9406 && GET_CODE (XEXP (x, 0)) == PLUS
9407 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9408 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9409 return true;
9411 /* Do not place an ELF TLS symbol in the constant pool. */
9412 return TARGET_ELF && tls_referenced_p (x);
9415 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9416 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9417 can be addressed relative to the toc pointer. */
9419 static bool
9420 use_toc_relative_ref (rtx sym, machine_mode mode)
9422 return ((constant_pool_expr_p (sym)
9423 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9424 get_pool_mode (sym)))
9425 || (TARGET_CMODEL == CMODEL_MEDIUM
9426 && SYMBOL_REF_LOCAL_P (sym)
9427 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9430 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9431 replace the input X, or the original X if no replacement is called for.
9432 The output parameter *WIN is 1 if the calling macro should goto WIN,
9433 0 if it should not.
9435 For RS/6000, we wish to handle large displacements off a base
9436 register by splitting the addend across an addiu/addis and the mem insn.
9437 This cuts number of extra insns needed from 3 to 1.
9439 On Darwin, we use this to generate code for floating point constants.
9440 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9441 The Darwin code is inside #if TARGET_MACHO because only then are the
9442 machopic_* functions defined. */
9443 static rtx
9444 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9445 int opnum, int type,
9446 int ind_levels ATTRIBUTE_UNUSED, int *win)
9448 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9449 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9451 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9452 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9453 if (reg_offset_p
9454 && opnum == 1
9455 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9456 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9457 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9458 && TARGET_P9_VECTOR)
9459 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9460 && TARGET_P9_VECTOR)))
9461 reg_offset_p = false;
9463 /* We must recognize output that we have already generated ourselves. */
9464 if (GET_CODE (x) == PLUS
9465 && GET_CODE (XEXP (x, 0)) == PLUS
9466 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9467 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9468 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9470 if (TARGET_DEBUG_ADDR)
9472 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9473 debug_rtx (x);
9475 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9476 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9477 opnum, (enum reload_type) type);
9478 *win = 1;
9479 return x;
9482 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9483 if (GET_CODE (x) == LO_SUM
9484 && GET_CODE (XEXP (x, 0)) == HIGH)
9486 if (TARGET_DEBUG_ADDR)
9488 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9489 debug_rtx (x);
9491 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9492 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9493 opnum, (enum reload_type) type);
9494 *win = 1;
9495 return x;
9498 #if TARGET_MACHO
9499 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9500 && GET_CODE (x) == LO_SUM
9501 && GET_CODE (XEXP (x, 0)) == PLUS
9502 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9503 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9504 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9505 && machopic_operand_p (XEXP (x, 1)))
9507 /* Result of previous invocation of this function on Darwin
9508 floating point constant. */
9509 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9510 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9511 opnum, (enum reload_type) type);
9512 *win = 1;
9513 return x;
9515 #endif
9517 if (TARGET_CMODEL != CMODEL_SMALL
9518 && reg_offset_p
9519 && !quad_offset_p
9520 && small_toc_ref (x, VOIDmode))
9522 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9523 x = gen_rtx_LO_SUM (Pmode, hi, x);
9524 if (TARGET_DEBUG_ADDR)
9526 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9527 debug_rtx (x);
9529 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9530 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9531 opnum, (enum reload_type) type);
9532 *win = 1;
9533 return x;
9536 if (GET_CODE (x) == PLUS
9537 && REG_P (XEXP (x, 0))
9538 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9539 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9540 && CONST_INT_P (XEXP (x, 1))
9541 && reg_offset_p
9542 && !PAIRED_VECTOR_MODE (mode)
9543 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9545 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9546 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9547 HOST_WIDE_INT high
9548 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9550 /* Check for 32-bit overflow or quad addresses with one of the
9551 four least significant bits set. */
9552 if (high + low != val
9553 || (quad_offset_p && (low & 0xf)))
9555 *win = 0;
9556 return x;
9559 /* Reload the high part into a base reg; leave the low part
9560 in the mem directly. */
9562 x = gen_rtx_PLUS (GET_MODE (x),
9563 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9564 GEN_INT (high)),
9565 GEN_INT (low));
9567 if (TARGET_DEBUG_ADDR)
9569 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9570 debug_rtx (x);
9572 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9573 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9574 opnum, (enum reload_type) type);
9575 *win = 1;
9576 return x;
9579 if (GET_CODE (x) == SYMBOL_REF
9580 && reg_offset_p
9581 && !quad_offset_p
9582 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9583 && !PAIRED_VECTOR_MODE (mode)
9584 #if TARGET_MACHO
9585 && DEFAULT_ABI == ABI_DARWIN
9586 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9587 && machopic_symbol_defined_p (x)
9588 #else
9589 && DEFAULT_ABI == ABI_V4
9590 && !flag_pic
9591 #endif
9592 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9593 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9594 without fprs.
9595 ??? Assume floating point reg based on mode? This assumption is
9596 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9597 where reload ends up doing a DFmode load of a constant from
9598 mem using two gprs. Unfortunately, at this point reload
9599 hasn't yet selected regs so poking around in reload data
9600 won't help and even if we could figure out the regs reliably,
9601 we'd still want to allow this transformation when the mem is
9602 naturally aligned. Since we say the address is good here, we
9603 can't disable offsets from LO_SUMs in mem_operand_gpr.
9604 FIXME: Allow offset from lo_sum for other modes too, when
9605 mem is sufficiently aligned.
9607 Also disallow this if the type can go in VMX/Altivec registers, since
9608 those registers do not have d-form (reg+offset) address modes. */
9609 && !reg_addr[mode].scalar_in_vmx_p
9610 && mode != TFmode
9611 && mode != TDmode
9612 && mode != IFmode
9613 && mode != KFmode
9614 && (mode != TImode || !TARGET_VSX)
9615 && mode != PTImode
9616 && (mode != DImode || TARGET_POWERPC64)
9617 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9618 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9620 #if TARGET_MACHO
9621 if (flag_pic)
9623 rtx offset = machopic_gen_offset (x);
9624 x = gen_rtx_LO_SUM (GET_MODE (x),
9625 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9626 gen_rtx_HIGH (Pmode, offset)), offset);
9628 else
9629 #endif
9630 x = gen_rtx_LO_SUM (GET_MODE (x),
9631 gen_rtx_HIGH (Pmode, x), x);
9633 if (TARGET_DEBUG_ADDR)
9635 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9636 debug_rtx (x);
9638 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9639 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9640 opnum, (enum reload_type) type);
9641 *win = 1;
9642 return x;
9645 /* Reload an offset address wrapped by an AND that represents the
9646 masking of the lower bits. Strip the outer AND and let reload
9647 convert the offset address into an indirect address. For VSX,
9648 force reload to create the address with an AND in a separate
9649 register, because we can't guarantee an altivec register will
9650 be used. */
9651 if (VECTOR_MEM_ALTIVEC_P (mode)
9652 && GET_CODE (x) == AND
9653 && GET_CODE (XEXP (x, 0)) == PLUS
9654 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9655 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9656 && GET_CODE (XEXP (x, 1)) == CONST_INT
9657 && INTVAL (XEXP (x, 1)) == -16)
9659 x = XEXP (x, 0);
9660 *win = 1;
9661 return x;
9664 if (TARGET_TOC
9665 && reg_offset_p
9666 && !quad_offset_p
9667 && GET_CODE (x) == SYMBOL_REF
9668 && use_toc_relative_ref (x, mode))
9670 x = create_TOC_reference (x, NULL_RTX);
9671 if (TARGET_CMODEL != CMODEL_SMALL)
9673 if (TARGET_DEBUG_ADDR)
9675 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9676 debug_rtx (x);
9678 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9679 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9680 opnum, (enum reload_type) type);
9682 *win = 1;
9683 return x;
9685 *win = 0;
9686 return x;
9689 /* Debug version of rs6000_legitimize_reload_address. */
9690 static rtx
9691 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9692 int opnum, int type,
9693 int ind_levels, int *win)
9695 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9696 ind_levels, win);
9697 fprintf (stderr,
9698 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9699 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9700 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9701 debug_rtx (x);
9703 if (x == ret)
9704 fprintf (stderr, "Same address returned\n");
9705 else if (!ret)
9706 fprintf (stderr, "NULL returned\n");
9707 else
9709 fprintf (stderr, "New address:\n");
9710 debug_rtx (ret);
9713 return ret;
9716 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9717 that is a valid memory address for an instruction.
9718 The MODE argument is the machine mode for the MEM expression
9719 that wants to use this address.
9721 On the RS/6000, there are four valid address: a SYMBOL_REF that
9722 refers to a constant pool entry of an address (or the sum of it
9723 plus a constant), a short (16-bit signed) constant plus a register,
9724 the sum of two registers, or a register indirect, possibly with an
9725 auto-increment. For DFmode, DDmode and DImode with a constant plus
9726 register, we must ensure that both words are addressable or PowerPC64
9727 with offset word aligned.
9729 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9730 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9731 because adjacent memory cells are accessed by adding word-sized offsets
9732 during assembly output. */
9733 static bool
9734 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9736 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9737 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9739 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9740 if (VECTOR_MEM_ALTIVEC_P (mode)
9741 && GET_CODE (x) == AND
9742 && GET_CODE (XEXP (x, 1)) == CONST_INT
9743 && INTVAL (XEXP (x, 1)) == -16)
9744 x = XEXP (x, 0);
9746 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9747 return 0;
9748 if (legitimate_indirect_address_p (x, reg_ok_strict))
9749 return 1;
9750 if (TARGET_UPDATE
9751 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9752 && mode_supports_pre_incdec_p (mode)
9753 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9754 return 1;
9755 /* Handle restricted vector d-form offsets in ISA 3.0. */
9756 if (quad_offset_p)
9758 if (quad_address_p (x, mode, reg_ok_strict))
9759 return 1;
9761 else if (virtual_stack_registers_memory_p (x))
9762 return 1;
9764 else if (reg_offset_p)
9766 if (legitimate_small_data_p (mode, x))
9767 return 1;
9768 if (legitimate_constant_pool_address_p (x, mode,
9769 reg_ok_strict || lra_in_progress))
9770 return 1;
9771 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9772 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9773 return 1;
9776 /* For TImode, if we have TImode in VSX registers, only allow register
9777 indirect addresses. This will allow the values to go in either GPRs
9778 or VSX registers without reloading. The vector types would tend to
9779 go into VSX registers, so we allow REG+REG, while TImode seems
9780 somewhat split, in that some uses are GPR based, and some VSX based. */
9781 /* FIXME: We could loosen this by changing the following to
9782 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9783 but currently we cannot allow REG+REG addressing for TImode. See
9784 PR72827 for complete details on how this ends up hoodwinking DSE. */
9785 if (mode == TImode && TARGET_VSX)
9786 return 0;
9787 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9788 if (! reg_ok_strict
9789 && reg_offset_p
9790 && GET_CODE (x) == PLUS
9791 && GET_CODE (XEXP (x, 0)) == REG
9792 && (XEXP (x, 0) == virtual_stack_vars_rtx
9793 || XEXP (x, 0) == arg_pointer_rtx)
9794 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9795 return 1;
9796 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9797 return 1;
9798 if (!FLOAT128_2REG_P (mode)
9799 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9800 || TARGET_POWERPC64
9801 || (mode != DFmode && mode != DDmode))
9802 && (TARGET_POWERPC64 || mode != DImode)
9803 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9804 && mode != PTImode
9805 && !avoiding_indexed_address_p (mode)
9806 && legitimate_indexed_address_p (x, reg_ok_strict))
9807 return 1;
9808 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9809 && mode_supports_pre_modify_p (mode)
9810 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9811 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9812 reg_ok_strict, false)
9813 || (!avoiding_indexed_address_p (mode)
9814 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9815 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9816 return 1;
9817 if (reg_offset_p && !quad_offset_p
9818 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9819 return 1;
9820 return 0;
9823 /* Debug version of rs6000_legitimate_address_p. */
9824 static bool
9825 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9826 bool reg_ok_strict)
9828 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9829 fprintf (stderr,
9830 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9831 "strict = %d, reload = %s, code = %s\n",
9832 ret ? "true" : "false",
9833 GET_MODE_NAME (mode),
9834 reg_ok_strict,
9835 (reload_completed ? "after" : "before"),
9836 GET_RTX_NAME (GET_CODE (x)));
9837 debug_rtx (x);
9839 return ret;
9842 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9844 static bool
9845 rs6000_mode_dependent_address_p (const_rtx addr,
9846 addr_space_t as ATTRIBUTE_UNUSED)
9848 return rs6000_mode_dependent_address_ptr (addr);
9851 /* Go to LABEL if ADDR (a legitimate address expression)
9852 has an effect that depends on the machine mode it is used for.
9854 On the RS/6000 this is true of all integral offsets (since AltiVec
9855 and VSX modes don't allow them) or is a pre-increment or decrement.
9857 ??? Except that due to conceptual problems in offsettable_address_p
9858 we can't really report the problems of integral offsets. So leave
9859 this assuming that the adjustable offset must be valid for the
9860 sub-words of a TFmode operand, which is what we had before. */
9862 static bool
9863 rs6000_mode_dependent_address (const_rtx addr)
9865 switch (GET_CODE (addr))
9867 case PLUS:
9868 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9869 is considered a legitimate address before reload, so there
9870 are no offset restrictions in that case. Note that this
9871 condition is safe in strict mode because any address involving
9872 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9873 been rejected as illegitimate. */
9874 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9875 && XEXP (addr, 0) != arg_pointer_rtx
9876 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9878 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9879 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9881 break;
9883 case LO_SUM:
9884 /* Anything in the constant pool is sufficiently aligned that
9885 all bytes have the same high part address. */
9886 return !legitimate_constant_pool_address_p (addr, QImode, false);
9888 /* Auto-increment cases are now treated generically in recog.c. */
9889 case PRE_MODIFY:
9890 return TARGET_UPDATE;
9892 /* AND is only allowed in Altivec loads. */
9893 case AND:
9894 return true;
9896 default:
9897 break;
9900 return false;
9903 /* Debug version of rs6000_mode_dependent_address. */
9904 static bool
9905 rs6000_debug_mode_dependent_address (const_rtx addr)
9907 bool ret = rs6000_mode_dependent_address (addr);
9909 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9910 ret ? "true" : "false");
9911 debug_rtx (addr);
9913 return ret;
9916 /* Implement FIND_BASE_TERM. */
9919 rs6000_find_base_term (rtx op)
9921 rtx base;
9923 base = op;
9924 if (GET_CODE (base) == CONST)
9925 base = XEXP (base, 0);
9926 if (GET_CODE (base) == PLUS)
9927 base = XEXP (base, 0);
9928 if (GET_CODE (base) == UNSPEC)
9929 switch (XINT (base, 1))
9931 case UNSPEC_TOCREL:
9932 case UNSPEC_MACHOPIC_OFFSET:
9933 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9934 for aliasing purposes. */
9935 return XVECEXP (base, 0, 0);
9938 return op;
9941 /* More elaborate version of recog's offsettable_memref_p predicate
9942 that works around the ??? note of rs6000_mode_dependent_address.
9943 In particular it accepts
9945 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9947 in 32-bit mode, that the recog predicate rejects. */
9949 static bool
9950 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9952 bool worst_case;
9954 if (!MEM_P (op))
9955 return false;
9957 /* First mimic offsettable_memref_p. */
9958 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9959 return true;
9961 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9962 the latter predicate knows nothing about the mode of the memory
9963 reference and, therefore, assumes that it is the largest supported
9964 mode (TFmode). As a consequence, legitimate offsettable memory
9965 references are rejected. rs6000_legitimate_offset_address_p contains
9966 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9967 at least with a little bit of help here given that we know the
9968 actual registers used. */
9969 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9970 || GET_MODE_SIZE (reg_mode) == 4);
9971 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9972 true, worst_case);
9975 /* Determine the reassociation width to be used in reassociate_bb.
9976 This takes into account how many parallel operations we
9977 can actually do of a given type, and also the latency.
9979 int add/sub 6/cycle
9980 mul 2/cycle
9981 vect add/sub/mul 2/cycle
9982 fp add/sub/mul 2/cycle
9983 dfp 1/cycle
9986 static int
9987 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9988 machine_mode mode)
9990 switch (rs6000_cpu)
9992 case PROCESSOR_POWER8:
9993 case PROCESSOR_POWER9:
9994 if (DECIMAL_FLOAT_MODE_P (mode))
9995 return 1;
9996 if (VECTOR_MODE_P (mode))
9997 return 4;
9998 if (INTEGRAL_MODE_P (mode))
9999 return opc == MULT_EXPR ? 4 : 6;
10000 if (FLOAT_MODE_P (mode))
10001 return 4;
10002 break;
10003 default:
10004 break;
10006 return 1;
10009 /* Change register usage conditional on target flags. */
10010 static void
10011 rs6000_conditional_register_usage (void)
10013 int i;
10015 if (TARGET_DEBUG_TARGET)
10016 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10018 /* Set MQ register fixed (already call_used) so that it will not be
10019 allocated. */
10020 fixed_regs[64] = 1;
10022 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10023 if (TARGET_64BIT)
10024 fixed_regs[13] = call_used_regs[13]
10025 = call_really_used_regs[13] = 1;
10027 /* Conditionally disable FPRs. */
10028 if (TARGET_SOFT_FLOAT)
10029 for (i = 32; i < 64; i++)
10030 fixed_regs[i] = call_used_regs[i]
10031 = call_really_used_regs[i] = 1;
10033 /* The TOC register is not killed across calls in a way that is
10034 visible to the compiler. */
10035 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10036 call_really_used_regs[2] = 0;
10038 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10039 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10041 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10042 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10043 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10044 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10046 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10047 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10048 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10049 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10051 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10052 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10053 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10055 if (!TARGET_ALTIVEC && !TARGET_VSX)
10057 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10058 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10059 call_really_used_regs[VRSAVE_REGNO] = 1;
10062 if (TARGET_ALTIVEC || TARGET_VSX)
10063 global_regs[VSCR_REGNO] = 1;
10065 if (TARGET_ALTIVEC_ABI)
10067 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10068 call_used_regs[i] = call_really_used_regs[i] = 1;
10070 /* AIX reserves VR20:31 in non-extended ABI mode. */
10071 if (TARGET_XCOFF)
10072 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10073 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10078 /* Output insns to set DEST equal to the constant SOURCE as a series of
10079 lis, ori and shl instructions and return TRUE. */
10081 bool
10082 rs6000_emit_set_const (rtx dest, rtx source)
10084 machine_mode mode = GET_MODE (dest);
10085 rtx temp, set;
10086 rtx_insn *insn;
10087 HOST_WIDE_INT c;
10089 gcc_checking_assert (CONST_INT_P (source));
10090 c = INTVAL (source);
10091 switch (mode)
10093 case E_QImode:
10094 case E_HImode:
10095 emit_insn (gen_rtx_SET (dest, source));
10096 return true;
10098 case E_SImode:
10099 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10101 emit_insn (gen_rtx_SET (copy_rtx (temp),
10102 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10103 emit_insn (gen_rtx_SET (dest,
10104 gen_rtx_IOR (SImode, copy_rtx (temp),
10105 GEN_INT (c & 0xffff))));
10106 break;
10108 case E_DImode:
10109 if (!TARGET_POWERPC64)
10111 rtx hi, lo;
10113 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10114 DImode);
10115 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10116 DImode);
10117 emit_move_insn (hi, GEN_INT (c >> 32));
10118 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10119 emit_move_insn (lo, GEN_INT (c));
10121 else
10122 rs6000_emit_set_long_const (dest, c);
10123 break;
10125 default:
10126 gcc_unreachable ();
10129 insn = get_last_insn ();
10130 set = single_set (insn);
10131 if (! CONSTANT_P (SET_SRC (set)))
10132 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10134 return true;
10137 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10138 Output insns to set DEST equal to the constant C as a series of
10139 lis, ori and shl instructions. */
10141 static void
10142 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10144 rtx temp;
10145 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10147 ud1 = c & 0xffff;
10148 c = c >> 16;
10149 ud2 = c & 0xffff;
10150 c = c >> 16;
10151 ud3 = c & 0xffff;
10152 c = c >> 16;
10153 ud4 = c & 0xffff;
10155 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10156 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10157 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10159 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10160 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10162 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10164 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10165 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10166 if (ud1 != 0)
10167 emit_move_insn (dest,
10168 gen_rtx_IOR (DImode, copy_rtx (temp),
10169 GEN_INT (ud1)));
10171 else if (ud3 == 0 && ud4 == 0)
10173 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10175 gcc_assert (ud2 & 0x8000);
10176 emit_move_insn (copy_rtx (temp),
10177 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10178 if (ud1 != 0)
10179 emit_move_insn (copy_rtx (temp),
10180 gen_rtx_IOR (DImode, copy_rtx (temp),
10181 GEN_INT (ud1)));
10182 emit_move_insn (dest,
10183 gen_rtx_ZERO_EXTEND (DImode,
10184 gen_lowpart (SImode,
10185 copy_rtx (temp))));
10187 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10188 || (ud4 == 0 && ! (ud3 & 0x8000)))
10190 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10192 emit_move_insn (copy_rtx (temp),
10193 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10194 if (ud2 != 0)
10195 emit_move_insn (copy_rtx (temp),
10196 gen_rtx_IOR (DImode, copy_rtx (temp),
10197 GEN_INT (ud2)));
10198 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10199 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10200 GEN_INT (16)));
10201 if (ud1 != 0)
10202 emit_move_insn (dest,
10203 gen_rtx_IOR (DImode, copy_rtx (temp),
10204 GEN_INT (ud1)));
10206 else
10208 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10210 emit_move_insn (copy_rtx (temp),
10211 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10212 if (ud3 != 0)
10213 emit_move_insn (copy_rtx (temp),
10214 gen_rtx_IOR (DImode, copy_rtx (temp),
10215 GEN_INT (ud3)));
10217 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10218 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10219 GEN_INT (32)));
10220 if (ud2 != 0)
10221 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10222 gen_rtx_IOR (DImode, copy_rtx (temp),
10223 GEN_INT (ud2 << 16)));
10224 if (ud1 != 0)
10225 emit_move_insn (dest,
10226 gen_rtx_IOR (DImode, copy_rtx (temp),
10227 GEN_INT (ud1)));
10231 /* Helper for the following. Get rid of [r+r] memory refs
10232 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10234 static void
10235 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10237 if (GET_CODE (operands[0]) == MEM
10238 && GET_CODE (XEXP (operands[0], 0)) != REG
10239 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10240 GET_MODE (operands[0]), false))
10241 operands[0]
10242 = replace_equiv_address (operands[0],
10243 copy_addr_to_reg (XEXP (operands[0], 0)));
10245 if (GET_CODE (operands[1]) == MEM
10246 && GET_CODE (XEXP (operands[1], 0)) != REG
10247 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10248 GET_MODE (operands[1]), false))
10249 operands[1]
10250 = replace_equiv_address (operands[1],
10251 copy_addr_to_reg (XEXP (operands[1], 0)));
10254 /* Generate a vector of constants to permute MODE for a little-endian
10255 storage operation by swapping the two halves of a vector. */
10256 static rtvec
10257 rs6000_const_vec (machine_mode mode)
10259 int i, subparts;
10260 rtvec v;
10262 switch (mode)
10264 case E_V1TImode:
10265 subparts = 1;
10266 break;
10267 case E_V2DFmode:
10268 case E_V2DImode:
10269 subparts = 2;
10270 break;
10271 case E_V4SFmode:
10272 case E_V4SImode:
10273 subparts = 4;
10274 break;
10275 case E_V8HImode:
10276 subparts = 8;
10277 break;
10278 case E_V16QImode:
10279 subparts = 16;
10280 break;
10281 default:
10282 gcc_unreachable();
10285 v = rtvec_alloc (subparts);
10287 for (i = 0; i < subparts / 2; ++i)
10288 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10289 for (i = subparts / 2; i < subparts; ++i)
10290 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10292 return v;
10295 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10296 store operation. */
10297 void
10298 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10300 /* Scalar permutations are easier to express in integer modes rather than
10301 floating-point modes, so cast them here. We use V1TImode instead
10302 of TImode to ensure that the values don't go through GPRs. */
10303 if (FLOAT128_VECTOR_P (mode))
10305 dest = gen_lowpart (V1TImode, dest);
10306 source = gen_lowpart (V1TImode, source);
10307 mode = V1TImode;
10310 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10311 scalar. */
10312 if (mode == TImode || mode == V1TImode)
10313 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10314 GEN_INT (64))));
10315 else
10317 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10318 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10322 /* Emit a little-endian load from vector memory location SOURCE to VSX
10323 register DEST in mode MODE. The load is done with two permuting
10324 insn's that represent an lxvd2x and xxpermdi. */
10325 void
10326 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10328 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10329 V1TImode). */
10330 if (mode == TImode || mode == V1TImode)
10332 mode = V2DImode;
10333 dest = gen_lowpart (V2DImode, dest);
10334 source = adjust_address (source, V2DImode, 0);
10337 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10338 rs6000_emit_le_vsx_permute (tmp, source, mode);
10339 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10342 /* Emit a little-endian store to vector memory location DEST from VSX
10343 register SOURCE in mode MODE. The store is done with two permuting
10344 insn's that represent an xxpermdi and an stxvd2x. */
10345 void
10346 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10348 /* This should never be called during or after LRA, because it does
10349 not re-permute the source register. It is intended only for use
10350 during expand. */
10351 gcc_assert (!lra_in_progress && !reload_completed);
10353 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10354 V1TImode). */
10355 if (mode == TImode || mode == V1TImode)
10357 mode = V2DImode;
10358 dest = adjust_address (dest, V2DImode, 0);
10359 source = gen_lowpart (V2DImode, source);
10362 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10363 rs6000_emit_le_vsx_permute (tmp, source, mode);
10364 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10367 /* Emit a sequence representing a little-endian VSX load or store,
10368 moving data from SOURCE to DEST in mode MODE. This is done
10369 separately from rs6000_emit_move to ensure it is called only
10370 during expand. LE VSX loads and stores introduced later are
10371 handled with a split. The expand-time RTL generation allows
10372 us to optimize away redundant pairs of register-permutes. */
10373 void
10374 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10376 gcc_assert (!BYTES_BIG_ENDIAN
10377 && VECTOR_MEM_VSX_P (mode)
10378 && !TARGET_P9_VECTOR
10379 && !gpr_or_gpr_p (dest, source)
10380 && (MEM_P (source) ^ MEM_P (dest)));
10382 if (MEM_P (source))
10384 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10385 rs6000_emit_le_vsx_load (dest, source, mode);
10387 else
10389 if (!REG_P (source))
10390 source = force_reg (mode, source);
10391 rs6000_emit_le_vsx_store (dest, source, mode);
10395 /* Return whether a SFmode or SImode move can be done without converting one
10396 mode to another. This arrises when we have:
10398 (SUBREG:SF (REG:SI ...))
10399 (SUBREG:SI (REG:SF ...))
10401 and one of the values is in a floating point/vector register, where SFmode
10402 scalars are stored in DFmode format. */
10404 bool
10405 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10407 if (TARGET_ALLOW_SF_SUBREG)
10408 return true;
10410 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10411 return true;
10413 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10414 return true;
10416 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10417 if (SUBREG_P (dest))
10419 rtx dest_subreg = SUBREG_REG (dest);
10420 rtx src_subreg = SUBREG_REG (src);
10421 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10424 return false;
10428 /* Helper function to change moves with:
10430 (SUBREG:SF (REG:SI)) and
10431 (SUBREG:SI (REG:SF))
10433 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10434 values are stored as DFmode values in the VSX registers. We need to convert
10435 the bits before we can use a direct move or operate on the bits in the
10436 vector register as an integer type.
10438 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10440 static bool
10441 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10443 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10444 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10445 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10447 rtx inner_source = SUBREG_REG (source);
10448 machine_mode inner_mode = GET_MODE (inner_source);
10450 if (mode == SImode && inner_mode == SFmode)
10452 emit_insn (gen_movsi_from_sf (dest, inner_source));
10453 return true;
10456 if (mode == SFmode && inner_mode == SImode)
10458 emit_insn (gen_movsf_from_si (dest, inner_source));
10459 return true;
10463 return false;
10466 /* Emit a move from SOURCE to DEST in mode MODE. */
10467 void
10468 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10470 rtx operands[2];
10471 operands[0] = dest;
10472 operands[1] = source;
10474 if (TARGET_DEBUG_ADDR)
10476 fprintf (stderr,
10477 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10478 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10479 GET_MODE_NAME (mode),
10480 lra_in_progress,
10481 reload_completed,
10482 can_create_pseudo_p ());
10483 debug_rtx (dest);
10484 fprintf (stderr, "source:\n");
10485 debug_rtx (source);
10488 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10489 if (CONST_WIDE_INT_P (operands[1])
10490 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10492 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10493 gcc_unreachable ();
10496 /* See if we need to special case SImode/SFmode SUBREG moves. */
10497 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10498 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10499 return;
10501 /* Check if GCC is setting up a block move that will end up using FP
10502 registers as temporaries. We must make sure this is acceptable. */
10503 if (GET_CODE (operands[0]) == MEM
10504 && GET_CODE (operands[1]) == MEM
10505 && mode == DImode
10506 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10507 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10508 && ! (rs6000_slow_unaligned_access (SImode,
10509 (MEM_ALIGN (operands[0]) > 32
10510 ? 32 : MEM_ALIGN (operands[0])))
10511 || rs6000_slow_unaligned_access (SImode,
10512 (MEM_ALIGN (operands[1]) > 32
10513 ? 32 : MEM_ALIGN (operands[1]))))
10514 && ! MEM_VOLATILE_P (operands [0])
10515 && ! MEM_VOLATILE_P (operands [1]))
10517 emit_move_insn (adjust_address (operands[0], SImode, 0),
10518 adjust_address (operands[1], SImode, 0));
10519 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10520 adjust_address (copy_rtx (operands[1]), SImode, 4));
10521 return;
10524 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10525 && !gpc_reg_operand (operands[1], mode))
10526 operands[1] = force_reg (mode, operands[1]);
10528 /* Recognize the case where operand[1] is a reference to thread-local
10529 data and load its address to a register. */
10530 if (tls_referenced_p (operands[1]))
10532 enum tls_model model;
10533 rtx tmp = operands[1];
10534 rtx addend = NULL;
10536 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10538 addend = XEXP (XEXP (tmp, 0), 1);
10539 tmp = XEXP (XEXP (tmp, 0), 0);
10542 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10543 model = SYMBOL_REF_TLS_MODEL (tmp);
10544 gcc_assert (model != 0);
10546 tmp = rs6000_legitimize_tls_address (tmp, model);
10547 if (addend)
10549 tmp = gen_rtx_PLUS (mode, tmp, addend);
10550 tmp = force_operand (tmp, operands[0]);
10552 operands[1] = tmp;
10555 /* 128-bit constant floating-point values on Darwin should really be loaded
10556 as two parts. However, this premature splitting is a problem when DFmode
10557 values can go into Altivec registers. */
10558 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10559 && GET_CODE (operands[1]) == CONST_DOUBLE)
10561 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10562 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10563 DFmode);
10564 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10565 GET_MODE_SIZE (DFmode)),
10566 simplify_gen_subreg (DFmode, operands[1], mode,
10567 GET_MODE_SIZE (DFmode)),
10568 DFmode);
10569 return;
10572 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10573 p1:SD) if p1 is not of floating point class and p0 is spilled as
10574 we can have no analogous movsd_store for this. */
10575 if (lra_in_progress && mode == DDmode
10576 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10577 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10578 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10579 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10581 enum reg_class cl;
10582 int regno = REGNO (SUBREG_REG (operands[1]));
10584 if (regno >= FIRST_PSEUDO_REGISTER)
10586 cl = reg_preferred_class (regno);
10587 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10589 if (regno >= 0 && ! FP_REGNO_P (regno))
10591 mode = SDmode;
10592 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10593 operands[1] = SUBREG_REG (operands[1]);
10596 if (lra_in_progress
10597 && mode == SDmode
10598 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10599 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10600 && (REG_P (operands[1])
10601 || (GET_CODE (operands[1]) == SUBREG
10602 && REG_P (SUBREG_REG (operands[1])))))
10604 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10605 ? SUBREG_REG (operands[1]) : operands[1]);
10606 enum reg_class cl;
10608 if (regno >= FIRST_PSEUDO_REGISTER)
10610 cl = reg_preferred_class (regno);
10611 gcc_assert (cl != NO_REGS);
10612 regno = ira_class_hard_regs[cl][0];
10614 if (FP_REGNO_P (regno))
10616 if (GET_MODE (operands[0]) != DDmode)
10617 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10618 emit_insn (gen_movsd_store (operands[0], operands[1]));
10620 else if (INT_REGNO_P (regno))
10621 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10622 else
10623 gcc_unreachable();
10624 return;
10626 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10627 p:DD)) if p0 is not of floating point class and p1 is spilled as
10628 we can have no analogous movsd_load for this. */
10629 if (lra_in_progress && mode == DDmode
10630 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10631 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10632 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10633 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10635 enum reg_class cl;
10636 int regno = REGNO (SUBREG_REG (operands[0]));
10638 if (regno >= FIRST_PSEUDO_REGISTER)
10640 cl = reg_preferred_class (regno);
10641 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10643 if (regno >= 0 && ! FP_REGNO_P (regno))
10645 mode = SDmode;
10646 operands[0] = SUBREG_REG (operands[0]);
10647 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10650 if (lra_in_progress
10651 && mode == SDmode
10652 && (REG_P (operands[0])
10653 || (GET_CODE (operands[0]) == SUBREG
10654 && REG_P (SUBREG_REG (operands[0]))))
10655 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10656 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10658 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10659 ? SUBREG_REG (operands[0]) : operands[0]);
10660 enum reg_class cl;
10662 if (regno >= FIRST_PSEUDO_REGISTER)
10664 cl = reg_preferred_class (regno);
10665 gcc_assert (cl != NO_REGS);
10666 regno = ira_class_hard_regs[cl][0];
10668 if (FP_REGNO_P (regno))
10670 if (GET_MODE (operands[1]) != DDmode)
10671 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10672 emit_insn (gen_movsd_load (operands[0], operands[1]));
10674 else if (INT_REGNO_P (regno))
10675 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10676 else
10677 gcc_unreachable();
10678 return;
10681 /* FIXME: In the long term, this switch statement should go away
10682 and be replaced by a sequence of tests based on things like
10683 mode == Pmode. */
10684 switch (mode)
10686 case E_HImode:
10687 case E_QImode:
10688 if (CONSTANT_P (operands[1])
10689 && GET_CODE (operands[1]) != CONST_INT)
10690 operands[1] = force_const_mem (mode, operands[1]);
10691 break;
10693 case E_TFmode:
10694 case E_TDmode:
10695 case E_IFmode:
10696 case E_KFmode:
10697 if (FLOAT128_2REG_P (mode))
10698 rs6000_eliminate_indexed_memrefs (operands);
10699 /* fall through */
10701 case E_DFmode:
10702 case E_DDmode:
10703 case E_SFmode:
10704 case E_SDmode:
10705 if (CONSTANT_P (operands[1])
10706 && ! easy_fp_constant (operands[1], mode))
10707 operands[1] = force_const_mem (mode, operands[1]);
10708 break;
10710 case E_V16QImode:
10711 case E_V8HImode:
10712 case E_V4SFmode:
10713 case E_V4SImode:
10714 case E_V2SFmode:
10715 case E_V2SImode:
10716 case E_V2DFmode:
10717 case E_V2DImode:
10718 case E_V1TImode:
10719 if (CONSTANT_P (operands[1])
10720 && !easy_vector_constant (operands[1], mode))
10721 operands[1] = force_const_mem (mode, operands[1]);
10722 break;
10724 case E_SImode:
10725 case E_DImode:
10726 /* Use default pattern for address of ELF small data */
10727 if (TARGET_ELF
10728 && mode == Pmode
10729 && DEFAULT_ABI == ABI_V4
10730 && (GET_CODE (operands[1]) == SYMBOL_REF
10731 || GET_CODE (operands[1]) == CONST)
10732 && small_data_operand (operands[1], mode))
10734 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10735 return;
10738 if (DEFAULT_ABI == ABI_V4
10739 && mode == Pmode && mode == SImode
10740 && flag_pic == 1 && got_operand (operands[1], mode))
10742 emit_insn (gen_movsi_got (operands[0], operands[1]));
10743 return;
10746 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10747 && TARGET_NO_TOC
10748 && ! flag_pic
10749 && mode == Pmode
10750 && CONSTANT_P (operands[1])
10751 && GET_CODE (operands[1]) != HIGH
10752 && GET_CODE (operands[1]) != CONST_INT)
10754 rtx target = (!can_create_pseudo_p ()
10755 ? operands[0]
10756 : gen_reg_rtx (mode));
10758 /* If this is a function address on -mcall-aixdesc,
10759 convert it to the address of the descriptor. */
10760 if (DEFAULT_ABI == ABI_AIX
10761 && GET_CODE (operands[1]) == SYMBOL_REF
10762 && XSTR (operands[1], 0)[0] == '.')
10764 const char *name = XSTR (operands[1], 0);
10765 rtx new_ref;
10766 while (*name == '.')
10767 name++;
10768 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10769 CONSTANT_POOL_ADDRESS_P (new_ref)
10770 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10771 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10772 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10773 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10774 operands[1] = new_ref;
10777 if (DEFAULT_ABI == ABI_DARWIN)
10779 #if TARGET_MACHO
10780 if (MACHO_DYNAMIC_NO_PIC_P)
10782 /* Take care of any required data indirection. */
10783 operands[1] = rs6000_machopic_legitimize_pic_address (
10784 operands[1], mode, operands[0]);
10785 if (operands[0] != operands[1])
10786 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10787 return;
10789 #endif
10790 emit_insn (gen_macho_high (target, operands[1]));
10791 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10792 return;
10795 emit_insn (gen_elf_high (target, operands[1]));
10796 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10797 return;
10800 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10801 and we have put it in the TOC, we just need to make a TOC-relative
10802 reference to it. */
10803 if (TARGET_TOC
10804 && GET_CODE (operands[1]) == SYMBOL_REF
10805 && use_toc_relative_ref (operands[1], mode))
10806 operands[1] = create_TOC_reference (operands[1], operands[0]);
10807 else if (mode == Pmode
10808 && CONSTANT_P (operands[1])
10809 && GET_CODE (operands[1]) != HIGH
10810 && ((GET_CODE (operands[1]) != CONST_INT
10811 && ! easy_fp_constant (operands[1], mode))
10812 || (GET_CODE (operands[1]) == CONST_INT
10813 && (num_insns_constant (operands[1], mode)
10814 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10815 || (GET_CODE (operands[0]) == REG
10816 && FP_REGNO_P (REGNO (operands[0]))))
10817 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10818 && (TARGET_CMODEL == CMODEL_SMALL
10819 || can_create_pseudo_p ()
10820 || (REG_P (operands[0])
10821 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10824 #if TARGET_MACHO
10825 /* Darwin uses a special PIC legitimizer. */
10826 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10828 operands[1] =
10829 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10830 operands[0]);
10831 if (operands[0] != operands[1])
10832 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10833 return;
10835 #endif
10837 /* If we are to limit the number of things we put in the TOC and
10838 this is a symbol plus a constant we can add in one insn,
10839 just put the symbol in the TOC and add the constant. */
10840 if (GET_CODE (operands[1]) == CONST
10841 && TARGET_NO_SUM_IN_TOC
10842 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10843 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10844 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10845 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10846 && ! side_effects_p (operands[0]))
10848 rtx sym =
10849 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10850 rtx other = XEXP (XEXP (operands[1], 0), 1);
10852 sym = force_reg (mode, sym);
10853 emit_insn (gen_add3_insn (operands[0], sym, other));
10854 return;
10857 operands[1] = force_const_mem (mode, operands[1]);
10859 if (TARGET_TOC
10860 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10861 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10863 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10864 operands[0]);
10865 operands[1] = gen_const_mem (mode, tocref);
10866 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10869 break;
10871 case E_TImode:
10872 if (!VECTOR_MEM_VSX_P (TImode))
10873 rs6000_eliminate_indexed_memrefs (operands);
10874 break;
10876 case E_PTImode:
10877 rs6000_eliminate_indexed_memrefs (operands);
10878 break;
10880 default:
10881 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10884 /* Above, we may have called force_const_mem which may have returned
10885 an invalid address. If we can, fix this up; otherwise, reload will
10886 have to deal with it. */
10887 if (GET_CODE (operands[1]) == MEM)
10888 operands[1] = validize_mem (operands[1]);
10890 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10893 /* Nonzero if we can use a floating-point register to pass this arg. */
10894 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10895 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10896 && (CUM)->fregno <= FP_ARG_MAX_REG \
10897 && TARGET_HARD_FLOAT)
10899 /* Nonzero if we can use an AltiVec register to pass this arg. */
10900 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10901 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10902 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10903 && TARGET_ALTIVEC_ABI \
10904 && (NAMED))
10906 /* Walk down the type tree of TYPE counting consecutive base elements.
10907 If *MODEP is VOIDmode, then set it to the first valid floating point
10908 or vector type. If a non-floating point or vector type is found, or
10909 if a floating point or vector type that doesn't match a non-VOIDmode
10910 *MODEP is found, then return -1, otherwise return the count in the
10911 sub-tree. */
10913 static int
10914 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10916 machine_mode mode;
10917 HOST_WIDE_INT size;
10919 switch (TREE_CODE (type))
10921 case REAL_TYPE:
10922 mode = TYPE_MODE (type);
10923 if (!SCALAR_FLOAT_MODE_P (mode))
10924 return -1;
10926 if (*modep == VOIDmode)
10927 *modep = mode;
10929 if (*modep == mode)
10930 return 1;
10932 break;
10934 case COMPLEX_TYPE:
10935 mode = TYPE_MODE (TREE_TYPE (type));
10936 if (!SCALAR_FLOAT_MODE_P (mode))
10937 return -1;
10939 if (*modep == VOIDmode)
10940 *modep = mode;
10942 if (*modep == mode)
10943 return 2;
10945 break;
10947 case VECTOR_TYPE:
10948 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10949 return -1;
10951 /* Use V4SImode as representative of all 128-bit vector types. */
10952 size = int_size_in_bytes (type);
10953 switch (size)
10955 case 16:
10956 mode = V4SImode;
10957 break;
10958 default:
10959 return -1;
10962 if (*modep == VOIDmode)
10963 *modep = mode;
10965 /* Vector modes are considered to be opaque: two vectors are
10966 equivalent for the purposes of being homogeneous aggregates
10967 if they are the same size. */
10968 if (*modep == mode)
10969 return 1;
10971 break;
10973 case ARRAY_TYPE:
10975 int count;
10976 tree index = TYPE_DOMAIN (type);
10978 /* Can't handle incomplete types nor sizes that are not
10979 fixed. */
10980 if (!COMPLETE_TYPE_P (type)
10981 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10982 return -1;
10984 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10985 if (count == -1
10986 || !index
10987 || !TYPE_MAX_VALUE (index)
10988 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10989 || !TYPE_MIN_VALUE (index)
10990 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10991 || count < 0)
10992 return -1;
10994 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10995 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10997 /* There must be no padding. */
10998 if (wi::to_wide (TYPE_SIZE (type))
10999 != count * GET_MODE_BITSIZE (*modep))
11000 return -1;
11002 return count;
11005 case RECORD_TYPE:
11007 int count = 0;
11008 int sub_count;
11009 tree field;
11011 /* Can't handle incomplete types nor sizes that are not
11012 fixed. */
11013 if (!COMPLETE_TYPE_P (type)
11014 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11015 return -1;
11017 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11019 if (TREE_CODE (field) != FIELD_DECL)
11020 continue;
11022 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11023 if (sub_count < 0)
11024 return -1;
11025 count += sub_count;
11028 /* There must be no padding. */
11029 if (wi::to_wide (TYPE_SIZE (type))
11030 != count * GET_MODE_BITSIZE (*modep))
11031 return -1;
11033 return count;
11036 case UNION_TYPE:
11037 case QUAL_UNION_TYPE:
11039 /* These aren't very interesting except in a degenerate case. */
11040 int count = 0;
11041 int sub_count;
11042 tree field;
11044 /* Can't handle incomplete types nor sizes that are not
11045 fixed. */
11046 if (!COMPLETE_TYPE_P (type)
11047 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11048 return -1;
11050 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11052 if (TREE_CODE (field) != FIELD_DECL)
11053 continue;
11055 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11056 if (sub_count < 0)
11057 return -1;
11058 count = count > sub_count ? count : sub_count;
11061 /* There must be no padding. */
11062 if (wi::to_wide (TYPE_SIZE (type))
11063 != count * GET_MODE_BITSIZE (*modep))
11064 return -1;
11066 return count;
11069 default:
11070 break;
11073 return -1;
11076 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11077 float or vector aggregate that shall be passed in FP/vector registers
11078 according to the ELFv2 ABI, return the homogeneous element mode in
11079 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11081 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11083 static bool
11084 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11085 machine_mode *elt_mode,
11086 int *n_elts)
11088 /* Note that we do not accept complex types at the top level as
11089 homogeneous aggregates; these types are handled via the
11090 targetm.calls.split_complex_arg mechanism. Complex types
11091 can be elements of homogeneous aggregates, however. */
11092 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11094 machine_mode field_mode = VOIDmode;
11095 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11097 if (field_count > 0)
11099 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11100 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11102 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11103 up to AGGR_ARG_NUM_REG registers. */
11104 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11106 if (elt_mode)
11107 *elt_mode = field_mode;
11108 if (n_elts)
11109 *n_elts = field_count;
11110 return true;
11115 if (elt_mode)
11116 *elt_mode = mode;
11117 if (n_elts)
11118 *n_elts = 1;
11119 return false;
11122 /* Return a nonzero value to say to return the function value in
11123 memory, just as large structures are always returned. TYPE will be
11124 the data type of the value, and FNTYPE will be the type of the
11125 function doing the returning, or @code{NULL} for libcalls.
11127 The AIX ABI for the RS/6000 specifies that all structures are
11128 returned in memory. The Darwin ABI does the same.
11130 For the Darwin 64 Bit ABI, a function result can be returned in
11131 registers or in memory, depending on the size of the return data
11132 type. If it is returned in registers, the value occupies the same
11133 registers as it would if it were the first and only function
11134 argument. Otherwise, the function places its result in memory at
11135 the location pointed to by GPR3.
11137 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11138 but a draft put them in memory, and GCC used to implement the draft
11139 instead of the final standard. Therefore, aix_struct_return
11140 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11141 compatibility can change DRAFT_V4_STRUCT_RET to override the
11142 default, and -m switches get the final word. See
11143 rs6000_option_override_internal for more details.
11145 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11146 long double support is enabled. These values are returned in memory.
11148 int_size_in_bytes returns -1 for variable size objects, which go in
11149 memory always. The cast to unsigned makes -1 > 8. */
11151 static bool
11152 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11154 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11155 if (TARGET_MACHO
11156 && rs6000_darwin64_abi
11157 && TREE_CODE (type) == RECORD_TYPE
11158 && int_size_in_bytes (type) > 0)
11160 CUMULATIVE_ARGS valcum;
11161 rtx valret;
11163 valcum.words = 0;
11164 valcum.fregno = FP_ARG_MIN_REG;
11165 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11166 /* Do a trial code generation as if this were going to be passed
11167 as an argument; if any part goes in memory, we return NULL. */
11168 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11169 if (valret)
11170 return false;
11171 /* Otherwise fall through to more conventional ABI rules. */
11174 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11175 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11176 NULL, NULL))
11177 return false;
11179 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11180 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11181 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11182 return false;
11184 if (AGGREGATE_TYPE_P (type)
11185 && (aix_struct_return
11186 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11187 return true;
11189 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11190 modes only exist for GCC vector types if -maltivec. */
11191 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11192 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11193 return false;
11195 /* Return synthetic vectors in memory. */
11196 if (TREE_CODE (type) == VECTOR_TYPE
11197 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11199 static bool warned_for_return_big_vectors = false;
11200 if (!warned_for_return_big_vectors)
11202 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11203 "non-standard ABI extension with no compatibility "
11204 "guarantee");
11205 warned_for_return_big_vectors = true;
11207 return true;
11210 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11211 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11212 return true;
11214 return false;
11217 /* Specify whether values returned in registers should be at the most
11218 significant end of a register. We want aggregates returned by
11219 value to match the way aggregates are passed to functions. */
11221 static bool
11222 rs6000_return_in_msb (const_tree valtype)
11224 return (DEFAULT_ABI == ABI_ELFv2
11225 && BYTES_BIG_ENDIAN
11226 && AGGREGATE_TYPE_P (valtype)
11227 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11228 == PAD_UPWARD));
11231 #ifdef HAVE_AS_GNU_ATTRIBUTE
11232 /* Return TRUE if a call to function FNDECL may be one that
11233 potentially affects the function calling ABI of the object file. */
11235 static bool
11236 call_ABI_of_interest (tree fndecl)
11238 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11240 struct cgraph_node *c_node;
11242 /* Libcalls are always interesting. */
11243 if (fndecl == NULL_TREE)
11244 return true;
11246 /* Any call to an external function is interesting. */
11247 if (DECL_EXTERNAL (fndecl))
11248 return true;
11250 /* Interesting functions that we are emitting in this object file. */
11251 c_node = cgraph_node::get (fndecl);
11252 c_node = c_node->ultimate_alias_target ();
11253 return !c_node->only_called_directly_p ();
11255 return false;
11257 #endif
11259 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11260 for a call to a function whose data type is FNTYPE.
11261 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11263 For incoming args we set the number of arguments in the prototype large
11264 so we never return a PARALLEL. */
11266 void
11267 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11268 rtx libname ATTRIBUTE_UNUSED, int incoming,
11269 int libcall, int n_named_args,
11270 tree fndecl ATTRIBUTE_UNUSED,
11271 machine_mode return_mode ATTRIBUTE_UNUSED)
11273 static CUMULATIVE_ARGS zero_cumulative;
11275 *cum = zero_cumulative;
11276 cum->words = 0;
11277 cum->fregno = FP_ARG_MIN_REG;
11278 cum->vregno = ALTIVEC_ARG_MIN_REG;
11279 cum->prototype = (fntype && prototype_p (fntype));
11280 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11281 ? CALL_LIBCALL : CALL_NORMAL);
11282 cum->sysv_gregno = GP_ARG_MIN_REG;
11283 cum->stdarg = stdarg_p (fntype);
11284 cum->libcall = libcall;
11286 cum->nargs_prototype = 0;
11287 if (incoming || cum->prototype)
11288 cum->nargs_prototype = n_named_args;
11290 /* Check for a longcall attribute. */
11291 if ((!fntype && rs6000_default_long_calls)
11292 || (fntype
11293 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11294 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11295 cum->call_cookie |= CALL_LONG;
11297 if (TARGET_DEBUG_ARG)
11299 fprintf (stderr, "\ninit_cumulative_args:");
11300 if (fntype)
11302 tree ret_type = TREE_TYPE (fntype);
11303 fprintf (stderr, " ret code = %s,",
11304 get_tree_code_name (TREE_CODE (ret_type)));
11307 if (cum->call_cookie & CALL_LONG)
11308 fprintf (stderr, " longcall,");
11310 fprintf (stderr, " proto = %d, nargs = %d\n",
11311 cum->prototype, cum->nargs_prototype);
11314 #ifdef HAVE_AS_GNU_ATTRIBUTE
11315 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11317 cum->escapes = call_ABI_of_interest (fndecl);
11318 if (cum->escapes)
11320 tree return_type;
11322 if (fntype)
11324 return_type = TREE_TYPE (fntype);
11325 return_mode = TYPE_MODE (return_type);
11327 else
11328 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11330 if (return_type != NULL)
11332 if (TREE_CODE (return_type) == RECORD_TYPE
11333 && TYPE_TRANSPARENT_AGGR (return_type))
11335 return_type = TREE_TYPE (first_field (return_type));
11336 return_mode = TYPE_MODE (return_type);
11338 if (AGGREGATE_TYPE_P (return_type)
11339 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11340 <= 8))
11341 rs6000_returns_struct = true;
11343 if (SCALAR_FLOAT_MODE_P (return_mode))
11345 rs6000_passes_float = true;
11346 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11347 && (FLOAT128_IBM_P (return_mode)
11348 || FLOAT128_IEEE_P (return_mode)
11349 || (return_type != NULL
11350 && (TYPE_MAIN_VARIANT (return_type)
11351 == long_double_type_node))))
11352 rs6000_passes_long_double = true;
11354 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11355 || PAIRED_VECTOR_MODE (return_mode))
11356 rs6000_passes_vector = true;
11359 #endif
11361 if (fntype
11362 && !TARGET_ALTIVEC
11363 && TARGET_ALTIVEC_ABI
11364 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11366 error ("cannot return value in vector register because"
11367 " altivec instructions are disabled, use %qs"
11368 " to enable them", "-maltivec");
11372 /* The mode the ABI uses for a word. This is not the same as word_mode
11373 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11375 static scalar_int_mode
11376 rs6000_abi_word_mode (void)
11378 return TARGET_32BIT ? SImode : DImode;
11381 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11382 static char *
11383 rs6000_offload_options (void)
11385 if (TARGET_64BIT)
11386 return xstrdup ("-foffload-abi=lp64");
11387 else
11388 return xstrdup ("-foffload-abi=ilp32");
11391 /* On rs6000, function arguments are promoted, as are function return
11392 values. */
11394 static machine_mode
11395 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11396 machine_mode mode,
11397 int *punsignedp ATTRIBUTE_UNUSED,
11398 const_tree, int)
11400 PROMOTE_MODE (mode, *punsignedp, type);
11402 return mode;
11405 /* Return true if TYPE must be passed on the stack and not in registers. */
11407 static bool
11408 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11410 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11411 return must_pass_in_stack_var_size (mode, type);
11412 else
11413 return must_pass_in_stack_var_size_or_pad (mode, type);
11416 static inline bool
11417 is_complex_IBM_long_double (machine_mode mode)
11419 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11422 /* Whether ABI_V4 passes MODE args to a function in floating point
11423 registers. */
11425 static bool
11426 abi_v4_pass_in_fpr (machine_mode mode)
11428 if (!TARGET_HARD_FLOAT)
11429 return false;
11430 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11431 return true;
11432 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11433 return true;
11434 /* ABI_V4 passes complex IBM long double in 8 gprs.
11435 Stupid, but we can't change the ABI now. */
11436 if (is_complex_IBM_long_double (mode))
11437 return false;
11438 if (FLOAT128_2REG_P (mode))
11439 return true;
11440 if (DECIMAL_FLOAT_MODE_P (mode))
11441 return true;
11442 return false;
11445 /* Implement TARGET_FUNCTION_ARG_PADDING.
11447 For the AIX ABI structs are always stored left shifted in their
11448 argument slot. */
11450 static pad_direction
11451 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11453 #ifndef AGGREGATE_PADDING_FIXED
11454 #define AGGREGATE_PADDING_FIXED 0
11455 #endif
11456 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11457 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11458 #endif
11460 if (!AGGREGATE_PADDING_FIXED)
11462 /* GCC used to pass structures of the same size as integer types as
11463 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11464 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11465 passed padded downward, except that -mstrict-align further
11466 muddied the water in that multi-component structures of 2 and 4
11467 bytes in size were passed padded upward.
11469 The following arranges for best compatibility with previous
11470 versions of gcc, but removes the -mstrict-align dependency. */
11471 if (BYTES_BIG_ENDIAN)
11473 HOST_WIDE_INT size = 0;
11475 if (mode == BLKmode)
11477 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11478 size = int_size_in_bytes (type);
11480 else
11481 size = GET_MODE_SIZE (mode);
11483 if (size == 1 || size == 2 || size == 4)
11484 return PAD_DOWNWARD;
11486 return PAD_UPWARD;
11489 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11491 if (type != 0 && AGGREGATE_TYPE_P (type))
11492 return PAD_UPWARD;
11495 /* Fall back to the default. */
11496 return default_function_arg_padding (mode, type);
11499 /* If defined, a C expression that gives the alignment boundary, in bits,
11500 of an argument with the specified mode and type. If it is not defined,
11501 PARM_BOUNDARY is used for all arguments.
11503 V.4 wants long longs and doubles to be double word aligned. Just
11504 testing the mode size is a boneheaded way to do this as it means
11505 that other types such as complex int are also double word aligned.
11506 However, we're stuck with this because changing the ABI might break
11507 existing library interfaces.
11509 Quadword align Altivec/VSX vectors.
11510 Quadword align large synthetic vector types. */
11512 static unsigned int
11513 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11515 machine_mode elt_mode;
11516 int n_elts;
11518 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11520 if (DEFAULT_ABI == ABI_V4
11521 && (GET_MODE_SIZE (mode) == 8
11522 || (TARGET_HARD_FLOAT
11523 && !is_complex_IBM_long_double (mode)
11524 && FLOAT128_2REG_P (mode))))
11525 return 64;
11526 else if (FLOAT128_VECTOR_P (mode))
11527 return 128;
11528 else if (PAIRED_VECTOR_MODE (mode)
11529 || (type && TREE_CODE (type) == VECTOR_TYPE
11530 && int_size_in_bytes (type) >= 8
11531 && int_size_in_bytes (type) < 16))
11532 return 64;
11533 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11534 || (type && TREE_CODE (type) == VECTOR_TYPE
11535 && int_size_in_bytes (type) >= 16))
11536 return 128;
11538 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11539 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11540 -mcompat-align-parm is used. */
11541 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11542 || DEFAULT_ABI == ABI_ELFv2)
11543 && type && TYPE_ALIGN (type) > 64)
11545 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11546 or homogeneous float/vector aggregates here. We already handled
11547 vector aggregates above, but still need to check for float here. */
11548 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11549 && !SCALAR_FLOAT_MODE_P (elt_mode));
11551 /* We used to check for BLKmode instead of the above aggregate type
11552 check. Warn when this results in any difference to the ABI. */
11553 if (aggregate_p != (mode == BLKmode))
11555 static bool warned;
11556 if (!warned && warn_psabi)
11558 warned = true;
11559 inform (input_location,
11560 "the ABI of passing aggregates with %d-byte alignment"
11561 " has changed in GCC 5",
11562 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11566 if (aggregate_p)
11567 return 128;
11570 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11571 implement the "aggregate type" check as a BLKmode check here; this
11572 means certain aggregate types are in fact not aligned. */
11573 if (TARGET_MACHO && rs6000_darwin64_abi
11574 && mode == BLKmode
11575 && type && TYPE_ALIGN (type) > 64)
11576 return 128;
11578 return PARM_BOUNDARY;
11581 /* The offset in words to the start of the parameter save area. */
11583 static unsigned int
11584 rs6000_parm_offset (void)
11586 return (DEFAULT_ABI == ABI_V4 ? 2
11587 : DEFAULT_ABI == ABI_ELFv2 ? 4
11588 : 6);
11591 /* For a function parm of MODE and TYPE, return the starting word in
11592 the parameter area. NWORDS of the parameter area are already used. */
11594 static unsigned int
11595 rs6000_parm_start (machine_mode mode, const_tree type,
11596 unsigned int nwords)
11598 unsigned int align;
11600 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11601 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11604 /* Compute the size (in words) of a function argument. */
11606 static unsigned long
11607 rs6000_arg_size (machine_mode mode, const_tree type)
11609 unsigned long size;
11611 if (mode != BLKmode)
11612 size = GET_MODE_SIZE (mode);
11613 else
11614 size = int_size_in_bytes (type);
11616 if (TARGET_32BIT)
11617 return (size + 3) >> 2;
11618 else
11619 return (size + 7) >> 3;
11622 /* Use this to flush pending int fields. */
11624 static void
11625 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11626 HOST_WIDE_INT bitpos, int final)
11628 unsigned int startbit, endbit;
11629 int intregs, intoffset;
11631 /* Handle the situations where a float is taking up the first half
11632 of the GPR, and the other half is empty (typically due to
11633 alignment restrictions). We can detect this by a 8-byte-aligned
11634 int field, or by seeing that this is the final flush for this
11635 argument. Count the word and continue on. */
11636 if (cum->floats_in_gpr == 1
11637 && (cum->intoffset % 64 == 0
11638 || (cum->intoffset == -1 && final)))
11640 cum->words++;
11641 cum->floats_in_gpr = 0;
11644 if (cum->intoffset == -1)
11645 return;
11647 intoffset = cum->intoffset;
11648 cum->intoffset = -1;
11649 cum->floats_in_gpr = 0;
11651 if (intoffset % BITS_PER_WORD != 0)
11653 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11654 if (!int_mode_for_size (bits, 0).exists ())
11656 /* We couldn't find an appropriate mode, which happens,
11657 e.g., in packed structs when there are 3 bytes to load.
11658 Back intoffset back to the beginning of the word in this
11659 case. */
11660 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11664 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11665 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11666 intregs = (endbit - startbit) / BITS_PER_WORD;
11667 cum->words += intregs;
11668 /* words should be unsigned. */
11669 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11671 int pad = (endbit/BITS_PER_WORD) - cum->words;
11672 cum->words += pad;
11676 /* The darwin64 ABI calls for us to recurse down through structs,
11677 looking for elements passed in registers. Unfortunately, we have
11678 to track int register count here also because of misalignments
11679 in powerpc alignment mode. */
11681 static void
11682 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11683 const_tree type,
11684 HOST_WIDE_INT startbitpos)
11686 tree f;
11688 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11689 if (TREE_CODE (f) == FIELD_DECL)
11691 HOST_WIDE_INT bitpos = startbitpos;
11692 tree ftype = TREE_TYPE (f);
11693 machine_mode mode;
11694 if (ftype == error_mark_node)
11695 continue;
11696 mode = TYPE_MODE (ftype);
11698 if (DECL_SIZE (f) != 0
11699 && tree_fits_uhwi_p (bit_position (f)))
11700 bitpos += int_bit_position (f);
11702 /* ??? FIXME: else assume zero offset. */
11704 if (TREE_CODE (ftype) == RECORD_TYPE)
11705 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11706 else if (USE_FP_FOR_ARG_P (cum, mode))
11708 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11709 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11710 cum->fregno += n_fpregs;
11711 /* Single-precision floats present a special problem for
11712 us, because they are smaller than an 8-byte GPR, and so
11713 the structure-packing rules combined with the standard
11714 varargs behavior mean that we want to pack float/float
11715 and float/int combinations into a single register's
11716 space. This is complicated by the arg advance flushing,
11717 which works on arbitrarily large groups of int-type
11718 fields. */
11719 if (mode == SFmode)
11721 if (cum->floats_in_gpr == 1)
11723 /* Two floats in a word; count the word and reset
11724 the float count. */
11725 cum->words++;
11726 cum->floats_in_gpr = 0;
11728 else if (bitpos % 64 == 0)
11730 /* A float at the beginning of an 8-byte word;
11731 count it and put off adjusting cum->words until
11732 we see if a arg advance flush is going to do it
11733 for us. */
11734 cum->floats_in_gpr++;
11736 else
11738 /* The float is at the end of a word, preceded
11739 by integer fields, so the arg advance flush
11740 just above has already set cum->words and
11741 everything is taken care of. */
11744 else
11745 cum->words += n_fpregs;
11747 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11749 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11750 cum->vregno++;
11751 cum->words += 2;
11753 else if (cum->intoffset == -1)
11754 cum->intoffset = bitpos;
11758 /* Check for an item that needs to be considered specially under the darwin 64
11759 bit ABI. These are record types where the mode is BLK or the structure is
11760 8 bytes in size. */
11761 static int
11762 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11764 return rs6000_darwin64_abi
11765 && ((mode == BLKmode
11766 && TREE_CODE (type) == RECORD_TYPE
11767 && int_size_in_bytes (type) > 0)
11768 || (type && TREE_CODE (type) == RECORD_TYPE
11769 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11772 /* Update the data in CUM to advance over an argument
11773 of mode MODE and data type TYPE.
11774 (TYPE is null for libcalls where that information may not be available.)
11776 Note that for args passed by reference, function_arg will be called
11777 with MODE and TYPE set to that of the pointer to the arg, not the arg
11778 itself. */
11780 static void
11781 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11782 const_tree type, bool named, int depth)
11784 machine_mode elt_mode;
11785 int n_elts;
11787 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11789 /* Only tick off an argument if we're not recursing. */
11790 if (depth == 0)
11791 cum->nargs_prototype--;
11793 #ifdef HAVE_AS_GNU_ATTRIBUTE
11794 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11795 && cum->escapes)
11797 if (SCALAR_FLOAT_MODE_P (mode))
11799 rs6000_passes_float = true;
11800 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11801 && (FLOAT128_IBM_P (mode)
11802 || FLOAT128_IEEE_P (mode)
11803 || (type != NULL
11804 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11805 rs6000_passes_long_double = true;
11807 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11808 || (PAIRED_VECTOR_MODE (mode)
11809 && !cum->stdarg
11810 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11811 rs6000_passes_vector = true;
11813 #endif
11815 if (TARGET_ALTIVEC_ABI
11816 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11817 || (type && TREE_CODE (type) == VECTOR_TYPE
11818 && int_size_in_bytes (type) == 16)))
11820 bool stack = false;
11822 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11824 cum->vregno += n_elts;
11826 if (!TARGET_ALTIVEC)
11827 error ("cannot pass argument in vector register because"
11828 " altivec instructions are disabled, use %qs"
11829 " to enable them", "-maltivec");
11831 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11832 even if it is going to be passed in a vector register.
11833 Darwin does the same for variable-argument functions. */
11834 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11835 && TARGET_64BIT)
11836 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11837 stack = true;
11839 else
11840 stack = true;
11842 if (stack)
11844 int align;
11846 /* Vector parameters must be 16-byte aligned. In 32-bit
11847 mode this means we need to take into account the offset
11848 to the parameter save area. In 64-bit mode, they just
11849 have to start on an even word, since the parameter save
11850 area is 16-byte aligned. */
11851 if (TARGET_32BIT)
11852 align = -(rs6000_parm_offset () + cum->words) & 3;
11853 else
11854 align = cum->words & 1;
11855 cum->words += align + rs6000_arg_size (mode, type);
11857 if (TARGET_DEBUG_ARG)
11859 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11860 cum->words, align);
11861 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11862 cum->nargs_prototype, cum->prototype,
11863 GET_MODE_NAME (mode));
11867 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11869 int size = int_size_in_bytes (type);
11870 /* Variable sized types have size == -1 and are
11871 treated as if consisting entirely of ints.
11872 Pad to 16 byte boundary if needed. */
11873 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11874 && (cum->words % 2) != 0)
11875 cum->words++;
11876 /* For varargs, we can just go up by the size of the struct. */
11877 if (!named)
11878 cum->words += (size + 7) / 8;
11879 else
11881 /* It is tempting to say int register count just goes up by
11882 sizeof(type)/8, but this is wrong in a case such as
11883 { int; double; int; } [powerpc alignment]. We have to
11884 grovel through the fields for these too. */
11885 cum->intoffset = 0;
11886 cum->floats_in_gpr = 0;
11887 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11888 rs6000_darwin64_record_arg_advance_flush (cum,
11889 size * BITS_PER_UNIT, 1);
11891 if (TARGET_DEBUG_ARG)
11893 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11894 cum->words, TYPE_ALIGN (type), size);
11895 fprintf (stderr,
11896 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11897 cum->nargs_prototype, cum->prototype,
11898 GET_MODE_NAME (mode));
11901 else if (DEFAULT_ABI == ABI_V4)
11903 if (abi_v4_pass_in_fpr (mode))
11905 /* _Decimal128 must use an even/odd register pair. This assumes
11906 that the register number is odd when fregno is odd. */
11907 if (mode == TDmode && (cum->fregno % 2) == 1)
11908 cum->fregno++;
11910 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11911 <= FP_ARG_V4_MAX_REG)
11912 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11913 else
11915 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11916 if (mode == DFmode || FLOAT128_IBM_P (mode)
11917 || mode == DDmode || mode == TDmode)
11918 cum->words += cum->words & 1;
11919 cum->words += rs6000_arg_size (mode, type);
11922 else
11924 int n_words = rs6000_arg_size (mode, type);
11925 int gregno = cum->sysv_gregno;
11927 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11928 As does any other 2 word item such as complex int due to a
11929 historical mistake. */
11930 if (n_words == 2)
11931 gregno += (1 - gregno) & 1;
11933 /* Multi-reg args are not split between registers and stack. */
11934 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11936 /* Long long is aligned on the stack. So are other 2 word
11937 items such as complex int due to a historical mistake. */
11938 if (n_words == 2)
11939 cum->words += cum->words & 1;
11940 cum->words += n_words;
11943 /* Note: continuing to accumulate gregno past when we've started
11944 spilling to the stack indicates the fact that we've started
11945 spilling to the stack to expand_builtin_saveregs. */
11946 cum->sysv_gregno = gregno + n_words;
11949 if (TARGET_DEBUG_ARG)
11951 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11952 cum->words, cum->fregno);
11953 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11954 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11955 fprintf (stderr, "mode = %4s, named = %d\n",
11956 GET_MODE_NAME (mode), named);
11959 else
11961 int n_words = rs6000_arg_size (mode, type);
11962 int start_words = cum->words;
11963 int align_words = rs6000_parm_start (mode, type, start_words);
11965 cum->words = align_words + n_words;
11967 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11969 /* _Decimal128 must be passed in an even/odd float register pair.
11970 This assumes that the register number is odd when fregno is
11971 odd. */
11972 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11973 cum->fregno++;
11974 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11977 if (TARGET_DEBUG_ARG)
11979 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11980 cum->words, cum->fregno);
11981 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11982 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11983 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11984 named, align_words - start_words, depth);
11989 static void
11990 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11991 const_tree type, bool named)
11993 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11997 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11998 structure between cum->intoffset and bitpos to integer registers. */
12000 static void
12001 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12002 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12004 machine_mode mode;
12005 unsigned int regno;
12006 unsigned int startbit, endbit;
12007 int this_regno, intregs, intoffset;
12008 rtx reg;
12010 if (cum->intoffset == -1)
12011 return;
12013 intoffset = cum->intoffset;
12014 cum->intoffset = -1;
12016 /* If this is the trailing part of a word, try to only load that
12017 much into the register. Otherwise load the whole register. Note
12018 that in the latter case we may pick up unwanted bits. It's not a
12019 problem at the moment but may wish to revisit. */
12021 if (intoffset % BITS_PER_WORD != 0)
12023 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12024 if (!int_mode_for_size (bits, 0).exists (&mode))
12026 /* We couldn't find an appropriate mode, which happens,
12027 e.g., in packed structs when there are 3 bytes to load.
12028 Back intoffset back to the beginning of the word in this
12029 case. */
12030 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12031 mode = word_mode;
12034 else
12035 mode = word_mode;
12037 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12038 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12039 intregs = (endbit - startbit) / BITS_PER_WORD;
12040 this_regno = cum->words + intoffset / BITS_PER_WORD;
12042 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12043 cum->use_stack = 1;
12045 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12046 if (intregs <= 0)
12047 return;
12049 intoffset /= BITS_PER_UNIT;
12052 regno = GP_ARG_MIN_REG + this_regno;
12053 reg = gen_rtx_REG (mode, regno);
12054 rvec[(*k)++] =
12055 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12057 this_regno += 1;
12058 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12059 mode = word_mode;
12060 intregs -= 1;
12062 while (intregs > 0);
12065 /* Recursive workhorse for the following. */
12067 static void
12068 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12069 HOST_WIDE_INT startbitpos, rtx rvec[],
12070 int *k)
12072 tree f;
12074 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12075 if (TREE_CODE (f) == FIELD_DECL)
12077 HOST_WIDE_INT bitpos = startbitpos;
12078 tree ftype = TREE_TYPE (f);
12079 machine_mode mode;
12080 if (ftype == error_mark_node)
12081 continue;
12082 mode = TYPE_MODE (ftype);
12084 if (DECL_SIZE (f) != 0
12085 && tree_fits_uhwi_p (bit_position (f)))
12086 bitpos += int_bit_position (f);
12088 /* ??? FIXME: else assume zero offset. */
12090 if (TREE_CODE (ftype) == RECORD_TYPE)
12091 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12092 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12094 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12095 #if 0
12096 switch (mode)
12098 case E_SCmode: mode = SFmode; break;
12099 case E_DCmode: mode = DFmode; break;
12100 case E_TCmode: mode = TFmode; break;
12101 default: break;
12103 #endif
12104 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12105 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12107 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12108 && (mode == TFmode || mode == TDmode));
12109 /* Long double or _Decimal128 split over regs and memory. */
12110 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12111 cum->use_stack=1;
12113 rvec[(*k)++]
12114 = gen_rtx_EXPR_LIST (VOIDmode,
12115 gen_rtx_REG (mode, cum->fregno++),
12116 GEN_INT (bitpos / BITS_PER_UNIT));
12117 if (FLOAT128_2REG_P (mode))
12118 cum->fregno++;
12120 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12122 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12123 rvec[(*k)++]
12124 = gen_rtx_EXPR_LIST (VOIDmode,
12125 gen_rtx_REG (mode, cum->vregno++),
12126 GEN_INT (bitpos / BITS_PER_UNIT));
12128 else if (cum->intoffset == -1)
12129 cum->intoffset = bitpos;
12133 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12134 the register(s) to be used for each field and subfield of a struct
12135 being passed by value, along with the offset of where the
12136 register's value may be found in the block. FP fields go in FP
12137 register, vector fields go in vector registers, and everything
12138 else goes in int registers, packed as in memory.
12140 This code is also used for function return values. RETVAL indicates
12141 whether this is the case.
12143 Much of this is taken from the SPARC V9 port, which has a similar
12144 calling convention. */
12146 static rtx
12147 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12148 bool named, bool retval)
12150 rtx rvec[FIRST_PSEUDO_REGISTER];
12151 int k = 1, kbase = 1;
12152 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12153 /* This is a copy; modifications are not visible to our caller. */
12154 CUMULATIVE_ARGS copy_cum = *orig_cum;
12155 CUMULATIVE_ARGS *cum = &copy_cum;
12157 /* Pad to 16 byte boundary if needed. */
12158 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12159 && (cum->words % 2) != 0)
12160 cum->words++;
12162 cum->intoffset = 0;
12163 cum->use_stack = 0;
12164 cum->named = named;
12166 /* Put entries into rvec[] for individual FP and vector fields, and
12167 for the chunks of memory that go in int regs. Note we start at
12168 element 1; 0 is reserved for an indication of using memory, and
12169 may or may not be filled in below. */
12170 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12171 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12173 /* If any part of the struct went on the stack put all of it there.
12174 This hack is because the generic code for
12175 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12176 parts of the struct are not at the beginning. */
12177 if (cum->use_stack)
12179 if (retval)
12180 return NULL_RTX; /* doesn't go in registers at all */
12181 kbase = 0;
12182 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12184 if (k > 1 || cum->use_stack)
12185 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12186 else
12187 return NULL_RTX;
12190 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12192 static rtx
12193 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12194 int align_words)
12196 int n_units;
12197 int i, k;
12198 rtx rvec[GP_ARG_NUM_REG + 1];
12200 if (align_words >= GP_ARG_NUM_REG)
12201 return NULL_RTX;
12203 n_units = rs6000_arg_size (mode, type);
12205 /* Optimize the simple case where the arg fits in one gpr, except in
12206 the case of BLKmode due to assign_parms assuming that registers are
12207 BITS_PER_WORD wide. */
12208 if (n_units == 0
12209 || (n_units == 1 && mode != BLKmode))
12210 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12212 k = 0;
12213 if (align_words + n_units > GP_ARG_NUM_REG)
12214 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12215 using a magic NULL_RTX component.
12216 This is not strictly correct. Only some of the arg belongs in
12217 memory, not all of it. However, the normal scheme using
12218 function_arg_partial_nregs can result in unusual subregs, eg.
12219 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12220 store the whole arg to memory is often more efficient than code
12221 to store pieces, and we know that space is available in the right
12222 place for the whole arg. */
12223 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12225 i = 0;
12228 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12229 rtx off = GEN_INT (i++ * 4);
12230 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12232 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12234 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12237 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12238 but must also be copied into the parameter save area starting at
12239 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12240 to the GPRs and/or memory. Return the number of elements used. */
12242 static int
12243 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12244 int align_words, rtx *rvec)
12246 int k = 0;
12248 if (align_words < GP_ARG_NUM_REG)
12250 int n_words = rs6000_arg_size (mode, type);
12252 if (align_words + n_words > GP_ARG_NUM_REG
12253 || mode == BLKmode
12254 || (TARGET_32BIT && TARGET_POWERPC64))
12256 /* If this is partially on the stack, then we only
12257 include the portion actually in registers here. */
12258 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12259 int i = 0;
12261 if (align_words + n_words > GP_ARG_NUM_REG)
12263 /* Not all of the arg fits in gprs. Say that it goes in memory
12264 too, using a magic NULL_RTX component. Also see comment in
12265 rs6000_mixed_function_arg for why the normal
12266 function_arg_partial_nregs scheme doesn't work in this case. */
12267 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12272 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12273 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12274 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12276 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12278 else
12280 /* The whole arg fits in gprs. */
12281 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12282 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12285 else
12287 /* It's entirely in memory. */
12288 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12291 return k;
12294 /* RVEC is a vector of K components of an argument of mode MODE.
12295 Construct the final function_arg return value from it. */
12297 static rtx
12298 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12300 gcc_assert (k >= 1);
12302 /* Avoid returning a PARALLEL in the trivial cases. */
12303 if (k == 1)
12305 if (XEXP (rvec[0], 0) == NULL_RTX)
12306 return NULL_RTX;
12308 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12309 return XEXP (rvec[0], 0);
12312 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12315 /* Determine where to put an argument to a function.
12316 Value is zero to push the argument on the stack,
12317 or a hard register in which to store the argument.
12319 MODE is the argument's machine mode.
12320 TYPE is the data type of the argument (as a tree).
12321 This is null for libcalls where that information may
12322 not be available.
12323 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12324 the preceding args and about the function being called. It is
12325 not modified in this routine.
12326 NAMED is nonzero if this argument is a named parameter
12327 (otherwise it is an extra parameter matching an ellipsis).
12329 On RS/6000 the first eight words of non-FP are normally in registers
12330 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12331 Under V.4, the first 8 FP args are in registers.
12333 If this is floating-point and no prototype is specified, we use
12334 both an FP and integer register (or possibly FP reg and stack). Library
12335 functions (when CALL_LIBCALL is set) always have the proper types for args,
12336 so we can pass the FP value just in one register. emit_library_function
12337 doesn't support PARALLEL anyway.
12339 Note that for args passed by reference, function_arg will be called
12340 with MODE and TYPE set to that of the pointer to the arg, not the arg
12341 itself. */
12343 static rtx
12344 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12345 const_tree type, bool named)
12347 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12348 enum rs6000_abi abi = DEFAULT_ABI;
12349 machine_mode elt_mode;
12350 int n_elts;
12352 /* Return a marker to indicate whether CR1 needs to set or clear the
12353 bit that V.4 uses to say fp args were passed in registers.
12354 Assume that we don't need the marker for software floating point,
12355 or compiler generated library calls. */
12356 if (mode == VOIDmode)
12358 if (abi == ABI_V4
12359 && (cum->call_cookie & CALL_LIBCALL) == 0
12360 && (cum->stdarg
12361 || (cum->nargs_prototype < 0
12362 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12363 && TARGET_HARD_FLOAT)
12364 return GEN_INT (cum->call_cookie
12365 | ((cum->fregno == FP_ARG_MIN_REG)
12366 ? CALL_V4_SET_FP_ARGS
12367 : CALL_V4_CLEAR_FP_ARGS));
12369 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12372 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12374 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12376 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12377 if (rslt != NULL_RTX)
12378 return rslt;
12379 /* Else fall through to usual handling. */
12382 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12384 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12385 rtx r, off;
12386 int i, k = 0;
12388 /* Do we also need to pass this argument in the parameter save area?
12389 Library support functions for IEEE 128-bit are assumed to not need the
12390 value passed both in GPRs and in vector registers. */
12391 if (TARGET_64BIT && !cum->prototype
12392 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12394 int align_words = ROUND_UP (cum->words, 2);
12395 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12398 /* Describe where this argument goes in the vector registers. */
12399 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12401 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12402 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12403 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12406 return rs6000_finish_function_arg (mode, rvec, k);
12408 else if (TARGET_ALTIVEC_ABI
12409 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12410 || (type && TREE_CODE (type) == VECTOR_TYPE
12411 && int_size_in_bytes (type) == 16)))
12413 if (named || abi == ABI_V4)
12414 return NULL_RTX;
12415 else
12417 /* Vector parameters to varargs functions under AIX or Darwin
12418 get passed in memory and possibly also in GPRs. */
12419 int align, align_words, n_words;
12420 machine_mode part_mode;
12422 /* Vector parameters must be 16-byte aligned. In 32-bit
12423 mode this means we need to take into account the offset
12424 to the parameter save area. In 64-bit mode, they just
12425 have to start on an even word, since the parameter save
12426 area is 16-byte aligned. */
12427 if (TARGET_32BIT)
12428 align = -(rs6000_parm_offset () + cum->words) & 3;
12429 else
12430 align = cum->words & 1;
12431 align_words = cum->words + align;
12433 /* Out of registers? Memory, then. */
12434 if (align_words >= GP_ARG_NUM_REG)
12435 return NULL_RTX;
12437 if (TARGET_32BIT && TARGET_POWERPC64)
12438 return rs6000_mixed_function_arg (mode, type, align_words);
12440 /* The vector value goes in GPRs. Only the part of the
12441 value in GPRs is reported here. */
12442 part_mode = mode;
12443 n_words = rs6000_arg_size (mode, type);
12444 if (align_words + n_words > GP_ARG_NUM_REG)
12445 /* Fortunately, there are only two possibilities, the value
12446 is either wholly in GPRs or half in GPRs and half not. */
12447 part_mode = DImode;
12449 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12453 else if (abi == ABI_V4)
12455 if (abi_v4_pass_in_fpr (mode))
12457 /* _Decimal128 must use an even/odd register pair. This assumes
12458 that the register number is odd when fregno is odd. */
12459 if (mode == TDmode && (cum->fregno % 2) == 1)
12460 cum->fregno++;
12462 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12463 <= FP_ARG_V4_MAX_REG)
12464 return gen_rtx_REG (mode, cum->fregno);
12465 else
12466 return NULL_RTX;
12468 else
12470 int n_words = rs6000_arg_size (mode, type);
12471 int gregno = cum->sysv_gregno;
12473 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12474 As does any other 2 word item such as complex int due to a
12475 historical mistake. */
12476 if (n_words == 2)
12477 gregno += (1 - gregno) & 1;
12479 /* Multi-reg args are not split between registers and stack. */
12480 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12481 return NULL_RTX;
12483 if (TARGET_32BIT && TARGET_POWERPC64)
12484 return rs6000_mixed_function_arg (mode, type,
12485 gregno - GP_ARG_MIN_REG);
12486 return gen_rtx_REG (mode, gregno);
12489 else
12491 int align_words = rs6000_parm_start (mode, type, cum->words);
12493 /* _Decimal128 must be passed in an even/odd float register pair.
12494 This assumes that the register number is odd when fregno is odd. */
12495 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12496 cum->fregno++;
12498 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12500 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12501 rtx r, off;
12502 int i, k = 0;
12503 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12504 int fpr_words;
12506 /* Do we also need to pass this argument in the parameter
12507 save area? */
12508 if (type && (cum->nargs_prototype <= 0
12509 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12510 && TARGET_XL_COMPAT
12511 && align_words >= GP_ARG_NUM_REG)))
12512 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12514 /* Describe where this argument goes in the fprs. */
12515 for (i = 0; i < n_elts
12516 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12518 /* Check if the argument is split over registers and memory.
12519 This can only ever happen for long double or _Decimal128;
12520 complex types are handled via split_complex_arg. */
12521 machine_mode fmode = elt_mode;
12522 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12524 gcc_assert (FLOAT128_2REG_P (fmode));
12525 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12528 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12529 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12530 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12533 /* If there were not enough FPRs to hold the argument, the rest
12534 usually goes into memory. However, if the current position
12535 is still within the register parameter area, a portion may
12536 actually have to go into GPRs.
12538 Note that it may happen that the portion of the argument
12539 passed in the first "half" of the first GPR was already
12540 passed in the last FPR as well.
12542 For unnamed arguments, we already set up GPRs to cover the
12543 whole argument in rs6000_psave_function_arg, so there is
12544 nothing further to do at this point. */
12545 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12546 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12547 && cum->nargs_prototype > 0)
12549 static bool warned;
12551 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12552 int n_words = rs6000_arg_size (mode, type);
12554 align_words += fpr_words;
12555 n_words -= fpr_words;
12559 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12560 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12561 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12563 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12565 if (!warned && warn_psabi)
12567 warned = true;
12568 inform (input_location,
12569 "the ABI of passing homogeneous float aggregates"
12570 " has changed in GCC 5");
12574 return rs6000_finish_function_arg (mode, rvec, k);
12576 else if (align_words < GP_ARG_NUM_REG)
12578 if (TARGET_32BIT && TARGET_POWERPC64)
12579 return rs6000_mixed_function_arg (mode, type, align_words);
12581 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12583 else
12584 return NULL_RTX;
12588 /* For an arg passed partly in registers and partly in memory, this is
12589 the number of bytes passed in registers. For args passed entirely in
12590 registers or entirely in memory, zero. When an arg is described by a
12591 PARALLEL, perhaps using more than one register type, this function
12592 returns the number of bytes used by the first element of the PARALLEL. */
12594 static int
12595 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12596 tree type, bool named)
12598 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12599 bool passed_in_gprs = true;
12600 int ret = 0;
12601 int align_words;
12602 machine_mode elt_mode;
12603 int n_elts;
12605 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12607 if (DEFAULT_ABI == ABI_V4)
12608 return 0;
12610 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12612 /* If we are passing this arg in the fixed parameter save area (gprs or
12613 memory) as well as VRs, we do not use the partial bytes mechanism;
12614 instead, rs6000_function_arg will return a PARALLEL including a memory
12615 element as necessary. Library support functions for IEEE 128-bit are
12616 assumed to not need the value passed both in GPRs and in vector
12617 registers. */
12618 if (TARGET_64BIT && !cum->prototype
12619 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12620 return 0;
12622 /* Otherwise, we pass in VRs only. Check for partial copies. */
12623 passed_in_gprs = false;
12624 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12625 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12628 /* In this complicated case we just disable the partial_nregs code. */
12629 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12630 return 0;
12632 align_words = rs6000_parm_start (mode, type, cum->words);
12634 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12636 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12638 /* If we are passing this arg in the fixed parameter save area
12639 (gprs or memory) as well as FPRs, we do not use the partial
12640 bytes mechanism; instead, rs6000_function_arg will return a
12641 PARALLEL including a memory element as necessary. */
12642 if (type
12643 && (cum->nargs_prototype <= 0
12644 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12645 && TARGET_XL_COMPAT
12646 && align_words >= GP_ARG_NUM_REG)))
12647 return 0;
12649 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12650 passed_in_gprs = false;
12651 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12653 /* Compute number of bytes / words passed in FPRs. If there
12654 is still space available in the register parameter area
12655 *after* that amount, a part of the argument will be passed
12656 in GPRs. In that case, the total amount passed in any
12657 registers is equal to the amount that would have been passed
12658 in GPRs if everything were passed there, so we fall back to
12659 the GPR code below to compute the appropriate value. */
12660 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12661 * MIN (8, GET_MODE_SIZE (elt_mode)));
12662 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12664 if (align_words + fpr_words < GP_ARG_NUM_REG)
12665 passed_in_gprs = true;
12666 else
12667 ret = fpr;
12671 if (passed_in_gprs
12672 && align_words < GP_ARG_NUM_REG
12673 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12674 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12676 if (ret != 0 && TARGET_DEBUG_ARG)
12677 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12679 return ret;
12682 /* A C expression that indicates when an argument must be passed by
12683 reference. If nonzero for an argument, a copy of that argument is
12684 made in memory and a pointer to the argument is passed instead of
12685 the argument itself. The pointer is passed in whatever way is
12686 appropriate for passing a pointer to that type.
12688 Under V.4, aggregates and long double are passed by reference.
12690 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12691 reference unless the AltiVec vector extension ABI is in force.
12693 As an extension to all ABIs, variable sized types are passed by
12694 reference. */
12696 static bool
12697 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12698 machine_mode mode, const_tree type,
12699 bool named ATTRIBUTE_UNUSED)
12701 if (!type)
12702 return 0;
12704 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12705 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12707 if (TARGET_DEBUG_ARG)
12708 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12709 return 1;
12712 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12714 if (TARGET_DEBUG_ARG)
12715 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12716 return 1;
12719 if (int_size_in_bytes (type) < 0)
12721 if (TARGET_DEBUG_ARG)
12722 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12723 return 1;
12726 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12727 modes only exist for GCC vector types if -maltivec. */
12728 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12730 if (TARGET_DEBUG_ARG)
12731 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12732 return 1;
12735 /* Pass synthetic vectors in memory. */
12736 if (TREE_CODE (type) == VECTOR_TYPE
12737 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12739 static bool warned_for_pass_big_vectors = false;
12740 if (TARGET_DEBUG_ARG)
12741 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12742 if (!warned_for_pass_big_vectors)
12744 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12745 "non-standard ABI extension with no compatibility "
12746 "guarantee");
12747 warned_for_pass_big_vectors = true;
12749 return 1;
12752 return 0;
12755 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12756 already processes. Return true if the parameter must be passed
12757 (fully or partially) on the stack. */
12759 static bool
12760 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12762 machine_mode mode;
12763 int unsignedp;
12764 rtx entry_parm;
12766 /* Catch errors. */
12767 if (type == NULL || type == error_mark_node)
12768 return true;
12770 /* Handle types with no storage requirement. */
12771 if (TYPE_MODE (type) == VOIDmode)
12772 return false;
12774 /* Handle complex types. */
12775 if (TREE_CODE (type) == COMPLEX_TYPE)
12776 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12777 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12779 /* Handle transparent aggregates. */
12780 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12781 && TYPE_TRANSPARENT_AGGR (type))
12782 type = TREE_TYPE (first_field (type));
12784 /* See if this arg was passed by invisible reference. */
12785 if (pass_by_reference (get_cumulative_args (args_so_far),
12786 TYPE_MODE (type), type, true))
12787 type = build_pointer_type (type);
12789 /* Find mode as it is passed by the ABI. */
12790 unsignedp = TYPE_UNSIGNED (type);
12791 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12793 /* If we must pass in stack, we need a stack. */
12794 if (rs6000_must_pass_in_stack (mode, type))
12795 return true;
12797 /* If there is no incoming register, we need a stack. */
12798 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12799 if (entry_parm == NULL)
12800 return true;
12802 /* Likewise if we need to pass both in registers and on the stack. */
12803 if (GET_CODE (entry_parm) == PARALLEL
12804 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12805 return true;
12807 /* Also true if we're partially in registers and partially not. */
12808 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12809 return true;
12811 /* Update info on where next arg arrives in registers. */
12812 rs6000_function_arg_advance (args_so_far, mode, type, true);
12813 return false;
12816 /* Return true if FUN has no prototype, has a variable argument
12817 list, or passes any parameter in memory. */
12819 static bool
12820 rs6000_function_parms_need_stack (tree fun, bool incoming)
12822 tree fntype, result;
12823 CUMULATIVE_ARGS args_so_far_v;
12824 cumulative_args_t args_so_far;
12826 if (!fun)
12827 /* Must be a libcall, all of which only use reg parms. */
12828 return false;
12830 fntype = fun;
12831 if (!TYPE_P (fun))
12832 fntype = TREE_TYPE (fun);
12834 /* Varargs functions need the parameter save area. */
12835 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12836 return true;
12838 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12839 args_so_far = pack_cumulative_args (&args_so_far_v);
12841 /* When incoming, we will have been passed the function decl.
12842 It is necessary to use the decl to handle K&R style functions,
12843 where TYPE_ARG_TYPES may not be available. */
12844 if (incoming)
12846 gcc_assert (DECL_P (fun));
12847 result = DECL_RESULT (fun);
12849 else
12850 result = TREE_TYPE (fntype);
12852 if (result && aggregate_value_p (result, fntype))
12854 if (!TYPE_P (result))
12855 result = TREE_TYPE (result);
12856 result = build_pointer_type (result);
12857 rs6000_parm_needs_stack (args_so_far, result);
12860 if (incoming)
12862 tree parm;
12864 for (parm = DECL_ARGUMENTS (fun);
12865 parm && parm != void_list_node;
12866 parm = TREE_CHAIN (parm))
12867 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12868 return true;
12870 else
12872 function_args_iterator args_iter;
12873 tree arg_type;
12875 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12876 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12877 return true;
12880 return false;
12883 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12884 usually a constant depending on the ABI. However, in the ELFv2 ABI
12885 the register parameter area is optional when calling a function that
12886 has a prototype is scope, has no variable argument list, and passes
12887 all parameters in registers. */
12890 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12892 int reg_parm_stack_space;
12894 switch (DEFAULT_ABI)
12896 default:
12897 reg_parm_stack_space = 0;
12898 break;
12900 case ABI_AIX:
12901 case ABI_DARWIN:
12902 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12903 break;
12905 case ABI_ELFv2:
12906 /* ??? Recomputing this every time is a bit expensive. Is there
12907 a place to cache this information? */
12908 if (rs6000_function_parms_need_stack (fun, incoming))
12909 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12910 else
12911 reg_parm_stack_space = 0;
12912 break;
12915 return reg_parm_stack_space;
12918 static void
12919 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12921 int i;
12922 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12924 if (nregs == 0)
12925 return;
12927 for (i = 0; i < nregs; i++)
12929 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12930 if (reload_completed)
12932 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12933 tem = NULL_RTX;
12934 else
12935 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12936 i * GET_MODE_SIZE (reg_mode));
12938 else
12939 tem = replace_equiv_address (tem, XEXP (tem, 0));
12941 gcc_assert (tem);
12943 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12947 /* Perform any needed actions needed for a function that is receiving a
12948 variable number of arguments.
12950 CUM is as above.
12952 MODE and TYPE are the mode and type of the current parameter.
12954 PRETEND_SIZE is a variable that should be set to the amount of stack
12955 that must be pushed by the prolog to pretend that our caller pushed
12958 Normally, this macro will push all remaining incoming registers on the
12959 stack and set PRETEND_SIZE to the length of the registers pushed. */
12961 static void
12962 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12963 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12964 int no_rtl)
12966 CUMULATIVE_ARGS next_cum;
12967 int reg_size = TARGET_32BIT ? 4 : 8;
12968 rtx save_area = NULL_RTX, mem;
12969 int first_reg_offset;
12970 alias_set_type set;
12972 /* Skip the last named argument. */
12973 next_cum = *get_cumulative_args (cum);
12974 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12976 if (DEFAULT_ABI == ABI_V4)
12978 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12980 if (! no_rtl)
12982 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12983 HOST_WIDE_INT offset = 0;
12985 /* Try to optimize the size of the varargs save area.
12986 The ABI requires that ap.reg_save_area is doubleword
12987 aligned, but we don't need to allocate space for all
12988 the bytes, only those to which we actually will save
12989 anything. */
12990 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12991 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12992 if (TARGET_HARD_FLOAT
12993 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12994 && cfun->va_list_fpr_size)
12996 if (gpr_reg_num)
12997 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12998 * UNITS_PER_FP_WORD;
12999 if (cfun->va_list_fpr_size
13000 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13001 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13002 else
13003 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13004 * UNITS_PER_FP_WORD;
13006 if (gpr_reg_num)
13008 offset = -((first_reg_offset * reg_size) & ~7);
13009 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13011 gpr_reg_num = cfun->va_list_gpr_size;
13012 if (reg_size == 4 && (first_reg_offset & 1))
13013 gpr_reg_num++;
13015 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13017 else if (fpr_size)
13018 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13019 * UNITS_PER_FP_WORD
13020 - (int) (GP_ARG_NUM_REG * reg_size);
13022 if (gpr_size + fpr_size)
13024 rtx reg_save_area
13025 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13026 gcc_assert (GET_CODE (reg_save_area) == MEM);
13027 reg_save_area = XEXP (reg_save_area, 0);
13028 if (GET_CODE (reg_save_area) == PLUS)
13030 gcc_assert (XEXP (reg_save_area, 0)
13031 == virtual_stack_vars_rtx);
13032 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13033 offset += INTVAL (XEXP (reg_save_area, 1));
13035 else
13036 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13039 cfun->machine->varargs_save_offset = offset;
13040 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13043 else
13045 first_reg_offset = next_cum.words;
13046 save_area = crtl->args.internal_arg_pointer;
13048 if (targetm.calls.must_pass_in_stack (mode, type))
13049 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13052 set = get_varargs_alias_set ();
13053 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13054 && cfun->va_list_gpr_size)
13056 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13058 if (va_list_gpr_counter_field)
13059 /* V4 va_list_gpr_size counts number of registers needed. */
13060 n_gpr = cfun->va_list_gpr_size;
13061 else
13062 /* char * va_list instead counts number of bytes needed. */
13063 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13065 if (nregs > n_gpr)
13066 nregs = n_gpr;
13068 mem = gen_rtx_MEM (BLKmode,
13069 plus_constant (Pmode, save_area,
13070 first_reg_offset * reg_size));
13071 MEM_NOTRAP_P (mem) = 1;
13072 set_mem_alias_set (mem, set);
13073 set_mem_align (mem, BITS_PER_WORD);
13075 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13076 nregs);
13079 /* Save FP registers if needed. */
13080 if (DEFAULT_ABI == ABI_V4
13081 && TARGET_HARD_FLOAT
13082 && ! no_rtl
13083 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13084 && cfun->va_list_fpr_size)
13086 int fregno = next_cum.fregno, nregs;
13087 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13088 rtx lab = gen_label_rtx ();
13089 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13090 * UNITS_PER_FP_WORD);
13092 emit_jump_insn
13093 (gen_rtx_SET (pc_rtx,
13094 gen_rtx_IF_THEN_ELSE (VOIDmode,
13095 gen_rtx_NE (VOIDmode, cr1,
13096 const0_rtx),
13097 gen_rtx_LABEL_REF (VOIDmode, lab),
13098 pc_rtx)));
13100 for (nregs = 0;
13101 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13102 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13104 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13105 ? DFmode : SFmode,
13106 plus_constant (Pmode, save_area, off));
13107 MEM_NOTRAP_P (mem) = 1;
13108 set_mem_alias_set (mem, set);
13109 set_mem_align (mem, GET_MODE_ALIGNMENT (
13110 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13111 ? DFmode : SFmode));
13112 emit_move_insn (mem, gen_rtx_REG (
13113 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13114 ? DFmode : SFmode, fregno));
13117 emit_label (lab);
13121 /* Create the va_list data type. */
13123 static tree
13124 rs6000_build_builtin_va_list (void)
13126 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13128 /* For AIX, prefer 'char *' because that's what the system
13129 header files like. */
13130 if (DEFAULT_ABI != ABI_V4)
13131 return build_pointer_type (char_type_node);
13133 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13134 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13135 get_identifier ("__va_list_tag"), record);
13137 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13138 unsigned_char_type_node);
13139 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13140 unsigned_char_type_node);
13141 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13142 every user file. */
13143 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13144 get_identifier ("reserved"), short_unsigned_type_node);
13145 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13146 get_identifier ("overflow_arg_area"),
13147 ptr_type_node);
13148 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13149 get_identifier ("reg_save_area"),
13150 ptr_type_node);
13152 va_list_gpr_counter_field = f_gpr;
13153 va_list_fpr_counter_field = f_fpr;
13155 DECL_FIELD_CONTEXT (f_gpr) = record;
13156 DECL_FIELD_CONTEXT (f_fpr) = record;
13157 DECL_FIELD_CONTEXT (f_res) = record;
13158 DECL_FIELD_CONTEXT (f_ovf) = record;
13159 DECL_FIELD_CONTEXT (f_sav) = record;
13161 TYPE_STUB_DECL (record) = type_decl;
13162 TYPE_NAME (record) = type_decl;
13163 TYPE_FIELDS (record) = f_gpr;
13164 DECL_CHAIN (f_gpr) = f_fpr;
13165 DECL_CHAIN (f_fpr) = f_res;
13166 DECL_CHAIN (f_res) = f_ovf;
13167 DECL_CHAIN (f_ovf) = f_sav;
13169 layout_type (record);
13171 /* The correct type is an array type of one element. */
13172 return build_array_type (record, build_index_type (size_zero_node));
13175 /* Implement va_start. */
13177 static void
13178 rs6000_va_start (tree valist, rtx nextarg)
13180 HOST_WIDE_INT words, n_gpr, n_fpr;
13181 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13182 tree gpr, fpr, ovf, sav, t;
13184 /* Only SVR4 needs something special. */
13185 if (DEFAULT_ABI != ABI_V4)
13187 std_expand_builtin_va_start (valist, nextarg);
13188 return;
13191 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13192 f_fpr = DECL_CHAIN (f_gpr);
13193 f_res = DECL_CHAIN (f_fpr);
13194 f_ovf = DECL_CHAIN (f_res);
13195 f_sav = DECL_CHAIN (f_ovf);
13197 valist = build_simple_mem_ref (valist);
13198 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13199 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13200 f_fpr, NULL_TREE);
13201 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13202 f_ovf, NULL_TREE);
13203 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13204 f_sav, NULL_TREE);
13206 /* Count number of gp and fp argument registers used. */
13207 words = crtl->args.info.words;
13208 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13209 GP_ARG_NUM_REG);
13210 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13211 FP_ARG_NUM_REG);
13213 if (TARGET_DEBUG_ARG)
13214 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13215 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13216 words, n_gpr, n_fpr);
13218 if (cfun->va_list_gpr_size)
13220 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13221 build_int_cst (NULL_TREE, n_gpr));
13222 TREE_SIDE_EFFECTS (t) = 1;
13223 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13226 if (cfun->va_list_fpr_size)
13228 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13229 build_int_cst (NULL_TREE, n_fpr));
13230 TREE_SIDE_EFFECTS (t) = 1;
13231 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13233 #ifdef HAVE_AS_GNU_ATTRIBUTE
13234 if (call_ABI_of_interest (cfun->decl))
13235 rs6000_passes_float = true;
13236 #endif
13239 /* Find the overflow area. */
13240 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13241 if (words != 0)
13242 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13243 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13244 TREE_SIDE_EFFECTS (t) = 1;
13245 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13247 /* If there were no va_arg invocations, don't set up the register
13248 save area. */
13249 if (!cfun->va_list_gpr_size
13250 && !cfun->va_list_fpr_size
13251 && n_gpr < GP_ARG_NUM_REG
13252 && n_fpr < FP_ARG_V4_MAX_REG)
13253 return;
13255 /* Find the register save area. */
13256 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13257 if (cfun->machine->varargs_save_offset)
13258 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13259 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13260 TREE_SIDE_EFFECTS (t) = 1;
13261 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13264 /* Implement va_arg. */
13266 static tree
13267 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13268 gimple_seq *post_p)
13270 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13271 tree gpr, fpr, ovf, sav, reg, t, u;
13272 int size, rsize, n_reg, sav_ofs, sav_scale;
13273 tree lab_false, lab_over, addr;
13274 int align;
13275 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13276 int regalign = 0;
13277 gimple *stmt;
13279 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13281 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13282 return build_va_arg_indirect_ref (t);
13285 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13286 earlier version of gcc, with the property that it always applied alignment
13287 adjustments to the va-args (even for zero-sized types). The cheapest way
13288 to deal with this is to replicate the effect of the part of
13289 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13290 of relevance.
13291 We don't need to check for pass-by-reference because of the test above.
13292 We can return a simplifed answer, since we know there's no offset to add. */
13294 if (((TARGET_MACHO
13295 && rs6000_darwin64_abi)
13296 || DEFAULT_ABI == ABI_ELFv2
13297 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13298 && integer_zerop (TYPE_SIZE (type)))
13300 unsigned HOST_WIDE_INT align, boundary;
13301 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13302 align = PARM_BOUNDARY / BITS_PER_UNIT;
13303 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13304 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13305 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13306 boundary /= BITS_PER_UNIT;
13307 if (boundary > align)
13309 tree t ;
13310 /* This updates arg ptr by the amount that would be necessary
13311 to align the zero-sized (but not zero-alignment) item. */
13312 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13313 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13314 gimplify_and_add (t, pre_p);
13316 t = fold_convert (sizetype, valist_tmp);
13317 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13318 fold_convert (TREE_TYPE (valist),
13319 fold_build2 (BIT_AND_EXPR, sizetype, t,
13320 size_int (-boundary))));
13321 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13322 gimplify_and_add (t, pre_p);
13324 /* Since it is zero-sized there's no increment for the item itself. */
13325 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13326 return build_va_arg_indirect_ref (valist_tmp);
13329 if (DEFAULT_ABI != ABI_V4)
13331 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13333 tree elem_type = TREE_TYPE (type);
13334 machine_mode elem_mode = TYPE_MODE (elem_type);
13335 int elem_size = GET_MODE_SIZE (elem_mode);
13337 if (elem_size < UNITS_PER_WORD)
13339 tree real_part, imag_part;
13340 gimple_seq post = NULL;
13342 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13343 &post);
13344 /* Copy the value into a temporary, lest the formal temporary
13345 be reused out from under us. */
13346 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13347 gimple_seq_add_seq (pre_p, post);
13349 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13350 post_p);
13352 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13356 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13359 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13360 f_fpr = DECL_CHAIN (f_gpr);
13361 f_res = DECL_CHAIN (f_fpr);
13362 f_ovf = DECL_CHAIN (f_res);
13363 f_sav = DECL_CHAIN (f_ovf);
13365 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13366 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13367 f_fpr, NULL_TREE);
13368 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13369 f_ovf, NULL_TREE);
13370 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13371 f_sav, NULL_TREE);
13373 size = int_size_in_bytes (type);
13374 rsize = (size + 3) / 4;
13375 int pad = 4 * rsize - size;
13376 align = 1;
13378 machine_mode mode = TYPE_MODE (type);
13379 if (abi_v4_pass_in_fpr (mode))
13381 /* FP args go in FP registers, if present. */
13382 reg = fpr;
13383 n_reg = (size + 7) / 8;
13384 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13385 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13386 if (mode != SFmode && mode != SDmode)
13387 align = 8;
13389 else
13391 /* Otherwise into GP registers. */
13392 reg = gpr;
13393 n_reg = rsize;
13394 sav_ofs = 0;
13395 sav_scale = 4;
13396 if (n_reg == 2)
13397 align = 8;
13400 /* Pull the value out of the saved registers.... */
13402 lab_over = NULL;
13403 addr = create_tmp_var (ptr_type_node, "addr");
13405 /* AltiVec vectors never go in registers when -mabi=altivec. */
13406 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13407 align = 16;
13408 else
13410 lab_false = create_artificial_label (input_location);
13411 lab_over = create_artificial_label (input_location);
13413 /* Long long is aligned in the registers. As are any other 2 gpr
13414 item such as complex int due to a historical mistake. */
13415 u = reg;
13416 if (n_reg == 2 && reg == gpr)
13418 regalign = 1;
13419 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13420 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13421 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13422 unshare_expr (reg), u);
13424 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13425 reg number is 0 for f1, so we want to make it odd. */
13426 else if (reg == fpr && mode == TDmode)
13428 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13429 build_int_cst (TREE_TYPE (reg), 1));
13430 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13433 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13434 t = build2 (GE_EXPR, boolean_type_node, u, t);
13435 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13436 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13437 gimplify_and_add (t, pre_p);
13439 t = sav;
13440 if (sav_ofs)
13441 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13443 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13444 build_int_cst (TREE_TYPE (reg), n_reg));
13445 u = fold_convert (sizetype, u);
13446 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13447 t = fold_build_pointer_plus (t, u);
13449 /* _Decimal32 varargs are located in the second word of the 64-bit
13450 FP register for 32-bit binaries. */
13451 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13452 t = fold_build_pointer_plus_hwi (t, size);
13454 /* Args are passed right-aligned. */
13455 if (BYTES_BIG_ENDIAN)
13456 t = fold_build_pointer_plus_hwi (t, pad);
13458 gimplify_assign (addr, t, pre_p);
13460 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13462 stmt = gimple_build_label (lab_false);
13463 gimple_seq_add_stmt (pre_p, stmt);
13465 if ((n_reg == 2 && !regalign) || n_reg > 2)
13467 /* Ensure that we don't find any more args in regs.
13468 Alignment has taken care of for special cases. */
13469 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13473 /* ... otherwise out of the overflow area. */
13475 /* Care for on-stack alignment if needed. */
13476 t = ovf;
13477 if (align != 1)
13479 t = fold_build_pointer_plus_hwi (t, align - 1);
13480 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13481 build_int_cst (TREE_TYPE (t), -align));
13484 /* Args are passed right-aligned. */
13485 if (BYTES_BIG_ENDIAN)
13486 t = fold_build_pointer_plus_hwi (t, pad);
13488 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13490 gimplify_assign (unshare_expr (addr), t, pre_p);
13492 t = fold_build_pointer_plus_hwi (t, size);
13493 gimplify_assign (unshare_expr (ovf), t, pre_p);
13495 if (lab_over)
13497 stmt = gimple_build_label (lab_over);
13498 gimple_seq_add_stmt (pre_p, stmt);
13501 if (STRICT_ALIGNMENT
13502 && (TYPE_ALIGN (type)
13503 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13505 /* The value (of type complex double, for example) may not be
13506 aligned in memory in the saved registers, so copy via a
13507 temporary. (This is the same code as used for SPARC.) */
13508 tree tmp = create_tmp_var (type, "va_arg_tmp");
13509 tree dest_addr = build_fold_addr_expr (tmp);
13511 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13512 3, dest_addr, addr, size_int (rsize * 4));
13514 gimplify_and_add (copy, pre_p);
13515 addr = dest_addr;
13518 addr = fold_convert (ptrtype, addr);
13519 return build_va_arg_indirect_ref (addr);
13522 /* Builtins. */
13524 static void
13525 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13527 tree t;
13528 unsigned classify = rs6000_builtin_info[(int)code].attr;
13529 const char *attr_string = "";
13531 gcc_assert (name != NULL);
13532 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13534 if (rs6000_builtin_decls[(int)code])
13535 fatal_error (input_location,
13536 "internal error: builtin function %qs already processed",
13537 name);
13539 rs6000_builtin_decls[(int)code] = t =
13540 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13542 /* Set any special attributes. */
13543 if ((classify & RS6000_BTC_CONST) != 0)
13545 /* const function, function only depends on the inputs. */
13546 TREE_READONLY (t) = 1;
13547 TREE_NOTHROW (t) = 1;
13548 attr_string = ", const";
13550 else if ((classify & RS6000_BTC_PURE) != 0)
13552 /* pure function, function can read global memory, but does not set any
13553 external state. */
13554 DECL_PURE_P (t) = 1;
13555 TREE_NOTHROW (t) = 1;
13556 attr_string = ", pure";
13558 else if ((classify & RS6000_BTC_FP) != 0)
13560 /* Function is a math function. If rounding mode is on, then treat the
13561 function as not reading global memory, but it can have arbitrary side
13562 effects. If it is off, then assume the function is a const function.
13563 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13564 builtin-attribute.def that is used for the math functions. */
13565 TREE_NOTHROW (t) = 1;
13566 if (flag_rounding_math)
13568 DECL_PURE_P (t) = 1;
13569 DECL_IS_NOVOPS (t) = 1;
13570 attr_string = ", fp, pure";
13572 else
13574 TREE_READONLY (t) = 1;
13575 attr_string = ", fp, const";
13578 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13579 gcc_unreachable ();
13581 if (TARGET_DEBUG_BUILTIN)
13582 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13583 (int)code, name, attr_string);
13586 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13588 #undef RS6000_BUILTIN_0
13589 #undef RS6000_BUILTIN_1
13590 #undef RS6000_BUILTIN_2
13591 #undef RS6000_BUILTIN_3
13592 #undef RS6000_BUILTIN_A
13593 #undef RS6000_BUILTIN_D
13594 #undef RS6000_BUILTIN_H
13595 #undef RS6000_BUILTIN_P
13596 #undef RS6000_BUILTIN_Q
13597 #undef RS6000_BUILTIN_X
13599 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13600 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13601 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13602 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13603 { MASK, ICODE, NAME, ENUM },
13605 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13606 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13607 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13608 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13609 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13610 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13612 static const struct builtin_description bdesc_3arg[] =
13614 #include "rs6000-builtin.def"
13617 /* DST operations: void foo (void *, const int, const char). */
13619 #undef RS6000_BUILTIN_0
13620 #undef RS6000_BUILTIN_1
13621 #undef RS6000_BUILTIN_2
13622 #undef RS6000_BUILTIN_3
13623 #undef RS6000_BUILTIN_A
13624 #undef RS6000_BUILTIN_D
13625 #undef RS6000_BUILTIN_H
13626 #undef RS6000_BUILTIN_P
13627 #undef RS6000_BUILTIN_Q
13628 #undef RS6000_BUILTIN_X
13630 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13631 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13634 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13635 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13636 { MASK, ICODE, NAME, ENUM },
13638 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13639 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13640 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13641 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13643 static const struct builtin_description bdesc_dst[] =
13645 #include "rs6000-builtin.def"
13648 /* Simple binary operations: VECc = foo (VECa, VECb). */
13650 #undef RS6000_BUILTIN_0
13651 #undef RS6000_BUILTIN_1
13652 #undef RS6000_BUILTIN_2
13653 #undef RS6000_BUILTIN_3
13654 #undef RS6000_BUILTIN_A
13655 #undef RS6000_BUILTIN_D
13656 #undef RS6000_BUILTIN_H
13657 #undef RS6000_BUILTIN_P
13658 #undef RS6000_BUILTIN_Q
13659 #undef RS6000_BUILTIN_X
13661 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13663 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13664 { MASK, ICODE, NAME, ENUM },
13666 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13667 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13668 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13669 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13670 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13671 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13672 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13674 static const struct builtin_description bdesc_2arg[] =
13676 #include "rs6000-builtin.def"
13679 #undef RS6000_BUILTIN_0
13680 #undef RS6000_BUILTIN_1
13681 #undef RS6000_BUILTIN_2
13682 #undef RS6000_BUILTIN_3
13683 #undef RS6000_BUILTIN_A
13684 #undef RS6000_BUILTIN_D
13685 #undef RS6000_BUILTIN_H
13686 #undef RS6000_BUILTIN_P
13687 #undef RS6000_BUILTIN_Q
13688 #undef RS6000_BUILTIN_X
13690 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13691 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13692 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13693 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13695 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13696 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13697 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13698 { MASK, ICODE, NAME, ENUM },
13700 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13701 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13703 /* AltiVec predicates. */
13705 static const struct builtin_description bdesc_altivec_preds[] =
13707 #include "rs6000-builtin.def"
13710 /* PAIRED predicates. */
13711 #undef RS6000_BUILTIN_0
13712 #undef RS6000_BUILTIN_1
13713 #undef RS6000_BUILTIN_2
13714 #undef RS6000_BUILTIN_3
13715 #undef RS6000_BUILTIN_A
13716 #undef RS6000_BUILTIN_D
13717 #undef RS6000_BUILTIN_H
13718 #undef RS6000_BUILTIN_P
13719 #undef RS6000_BUILTIN_Q
13720 #undef RS6000_BUILTIN_X
13722 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13723 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13725 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13726 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13727 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13728 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13729 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13730 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13731 { MASK, ICODE, NAME, ENUM },
13733 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13735 static const struct builtin_description bdesc_paired_preds[] =
13737 #include "rs6000-builtin.def"
13740 /* ABS* operations. */
13742 #undef RS6000_BUILTIN_0
13743 #undef RS6000_BUILTIN_1
13744 #undef RS6000_BUILTIN_2
13745 #undef RS6000_BUILTIN_3
13746 #undef RS6000_BUILTIN_A
13747 #undef RS6000_BUILTIN_D
13748 #undef RS6000_BUILTIN_H
13749 #undef RS6000_BUILTIN_P
13750 #undef RS6000_BUILTIN_Q
13751 #undef RS6000_BUILTIN_X
13753 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13754 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13755 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13756 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13757 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13758 { MASK, ICODE, NAME, ENUM },
13760 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13761 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13762 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13763 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13764 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13766 static const struct builtin_description bdesc_abs[] =
13768 #include "rs6000-builtin.def"
13771 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13772 foo (VECa). */
13774 #undef RS6000_BUILTIN_0
13775 #undef RS6000_BUILTIN_1
13776 #undef RS6000_BUILTIN_2
13777 #undef RS6000_BUILTIN_3
13778 #undef RS6000_BUILTIN_A
13779 #undef RS6000_BUILTIN_D
13780 #undef RS6000_BUILTIN_H
13781 #undef RS6000_BUILTIN_P
13782 #undef RS6000_BUILTIN_Q
13783 #undef RS6000_BUILTIN_X
13785 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13786 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13787 { MASK, ICODE, NAME, ENUM },
13789 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13790 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13791 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13792 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13793 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13794 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13795 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13796 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13798 static const struct builtin_description bdesc_1arg[] =
13800 #include "rs6000-builtin.def"
13803 /* Simple no-argument operations: result = __builtin_darn_32 () */
13805 #undef RS6000_BUILTIN_0
13806 #undef RS6000_BUILTIN_1
13807 #undef RS6000_BUILTIN_2
13808 #undef RS6000_BUILTIN_3
13809 #undef RS6000_BUILTIN_A
13810 #undef RS6000_BUILTIN_D
13811 #undef RS6000_BUILTIN_H
13812 #undef RS6000_BUILTIN_P
13813 #undef RS6000_BUILTIN_Q
13814 #undef RS6000_BUILTIN_X
13816 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13817 { MASK, ICODE, NAME, ENUM },
13819 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13820 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13821 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13822 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13823 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13824 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13825 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13826 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13827 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13829 static const struct builtin_description bdesc_0arg[] =
13831 #include "rs6000-builtin.def"
13834 /* HTM builtins. */
13835 #undef RS6000_BUILTIN_0
13836 #undef RS6000_BUILTIN_1
13837 #undef RS6000_BUILTIN_2
13838 #undef RS6000_BUILTIN_3
13839 #undef RS6000_BUILTIN_A
13840 #undef RS6000_BUILTIN_D
13841 #undef RS6000_BUILTIN_H
13842 #undef RS6000_BUILTIN_P
13843 #undef RS6000_BUILTIN_Q
13844 #undef RS6000_BUILTIN_X
13846 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13847 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13848 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13851 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13852 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13853 { MASK, ICODE, NAME, ENUM },
13855 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13856 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13857 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13859 static const struct builtin_description bdesc_htm[] =
13861 #include "rs6000-builtin.def"
13864 #undef RS6000_BUILTIN_0
13865 #undef RS6000_BUILTIN_1
13866 #undef RS6000_BUILTIN_2
13867 #undef RS6000_BUILTIN_3
13868 #undef RS6000_BUILTIN_A
13869 #undef RS6000_BUILTIN_D
13870 #undef RS6000_BUILTIN_H
13871 #undef RS6000_BUILTIN_P
13872 #undef RS6000_BUILTIN_Q
13874 /* Return true if a builtin function is overloaded. */
13875 bool
13876 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13878 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13881 const char *
13882 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13884 return rs6000_builtin_info[(int)fncode].name;
13887 /* Expand an expression EXP that calls a builtin without arguments. */
13888 static rtx
13889 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13891 rtx pat;
13892 machine_mode tmode = insn_data[icode].operand[0].mode;
13894 if (icode == CODE_FOR_nothing)
13895 /* Builtin not supported on this processor. */
13896 return 0;
13898 if (target == 0
13899 || GET_MODE (target) != tmode
13900 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13901 target = gen_reg_rtx (tmode);
13903 pat = GEN_FCN (icode) (target);
13904 if (! pat)
13905 return 0;
13906 emit_insn (pat);
13908 return target;
13912 static rtx
13913 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13915 rtx pat;
13916 tree arg0 = CALL_EXPR_ARG (exp, 0);
13917 tree arg1 = CALL_EXPR_ARG (exp, 1);
13918 rtx op0 = expand_normal (arg0);
13919 rtx op1 = expand_normal (arg1);
13920 machine_mode mode0 = insn_data[icode].operand[0].mode;
13921 machine_mode mode1 = insn_data[icode].operand[1].mode;
13923 if (icode == CODE_FOR_nothing)
13924 /* Builtin not supported on this processor. */
13925 return 0;
13927 /* If we got invalid arguments bail out before generating bad rtl. */
13928 if (arg0 == error_mark_node || arg1 == error_mark_node)
13929 return const0_rtx;
13931 if (GET_CODE (op0) != CONST_INT
13932 || INTVAL (op0) > 255
13933 || INTVAL (op0) < 0)
13935 error ("argument 1 must be an 8-bit field value");
13936 return const0_rtx;
13939 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13940 op0 = copy_to_mode_reg (mode0, op0);
13942 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13943 op1 = copy_to_mode_reg (mode1, op1);
13945 pat = GEN_FCN (icode) (op0, op1);
13946 if (! pat)
13947 return const0_rtx;
13948 emit_insn (pat);
13950 return NULL_RTX;
13953 static rtx
13954 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13956 rtx pat;
13957 tree arg0 = CALL_EXPR_ARG (exp, 0);
13958 rtx op0 = expand_normal (arg0);
13959 machine_mode tmode = insn_data[icode].operand[0].mode;
13960 machine_mode mode0 = insn_data[icode].operand[1].mode;
13962 if (icode == CODE_FOR_nothing)
13963 /* Builtin not supported on this processor. */
13964 return 0;
13966 /* If we got invalid arguments bail out before generating bad rtl. */
13967 if (arg0 == error_mark_node)
13968 return const0_rtx;
13970 if (icode == CODE_FOR_altivec_vspltisb
13971 || icode == CODE_FOR_altivec_vspltish
13972 || icode == CODE_FOR_altivec_vspltisw)
13974 /* Only allow 5-bit *signed* literals. */
13975 if (GET_CODE (op0) != CONST_INT
13976 || INTVAL (op0) > 15
13977 || INTVAL (op0) < -16)
13979 error ("argument 1 must be a 5-bit signed literal");
13980 return CONST0_RTX (tmode);
13984 if (target == 0
13985 || GET_MODE (target) != tmode
13986 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13987 target = gen_reg_rtx (tmode);
13989 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13990 op0 = copy_to_mode_reg (mode0, op0);
13992 pat = GEN_FCN (icode) (target, op0);
13993 if (! pat)
13994 return 0;
13995 emit_insn (pat);
13997 return target;
14000 static rtx
14001 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14003 rtx pat, scratch1, scratch2;
14004 tree arg0 = CALL_EXPR_ARG (exp, 0);
14005 rtx op0 = expand_normal (arg0);
14006 machine_mode tmode = insn_data[icode].operand[0].mode;
14007 machine_mode mode0 = insn_data[icode].operand[1].mode;
14009 /* If we have invalid arguments, bail out before generating bad rtl. */
14010 if (arg0 == error_mark_node)
14011 return const0_rtx;
14013 if (target == 0
14014 || GET_MODE (target) != tmode
14015 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14016 target = gen_reg_rtx (tmode);
14018 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14019 op0 = copy_to_mode_reg (mode0, op0);
14021 scratch1 = gen_reg_rtx (mode0);
14022 scratch2 = gen_reg_rtx (mode0);
14024 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14025 if (! pat)
14026 return 0;
14027 emit_insn (pat);
14029 return target;
14032 static rtx
14033 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14035 rtx pat;
14036 tree arg0 = CALL_EXPR_ARG (exp, 0);
14037 tree arg1 = CALL_EXPR_ARG (exp, 1);
14038 rtx op0 = expand_normal (arg0);
14039 rtx op1 = expand_normal (arg1);
14040 machine_mode tmode = insn_data[icode].operand[0].mode;
14041 machine_mode mode0 = insn_data[icode].operand[1].mode;
14042 machine_mode mode1 = insn_data[icode].operand[2].mode;
14044 if (icode == CODE_FOR_nothing)
14045 /* Builtin not supported on this processor. */
14046 return 0;
14048 /* If we got invalid arguments bail out before generating bad rtl. */
14049 if (arg0 == error_mark_node || arg1 == error_mark_node)
14050 return const0_rtx;
14052 if (icode == CODE_FOR_altivec_vcfux
14053 || icode == CODE_FOR_altivec_vcfsx
14054 || icode == CODE_FOR_altivec_vctsxs
14055 || icode == CODE_FOR_altivec_vctuxs
14056 || icode == CODE_FOR_altivec_vspltb
14057 || icode == CODE_FOR_altivec_vsplth
14058 || icode == CODE_FOR_altivec_vspltw)
14060 /* Only allow 5-bit unsigned literals. */
14061 STRIP_NOPS (arg1);
14062 if (TREE_CODE (arg1) != INTEGER_CST
14063 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14065 error ("argument 2 must be a 5-bit unsigned literal");
14066 return CONST0_RTX (tmode);
14069 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14070 || icode == CODE_FOR_dfptstsfi_lt_dd
14071 || icode == CODE_FOR_dfptstsfi_gt_dd
14072 || icode == CODE_FOR_dfptstsfi_unordered_dd
14073 || icode == CODE_FOR_dfptstsfi_eq_td
14074 || icode == CODE_FOR_dfptstsfi_lt_td
14075 || icode == CODE_FOR_dfptstsfi_gt_td
14076 || icode == CODE_FOR_dfptstsfi_unordered_td)
14078 /* Only allow 6-bit unsigned literals. */
14079 STRIP_NOPS (arg0);
14080 if (TREE_CODE (arg0) != INTEGER_CST
14081 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14083 error ("argument 1 must be a 6-bit unsigned literal");
14084 return CONST0_RTX (tmode);
14087 else if (icode == CODE_FOR_xststdcqp_kf
14088 || icode == CODE_FOR_xststdcqp_tf
14089 || icode == CODE_FOR_xststdcdp
14090 || icode == CODE_FOR_xststdcsp
14091 || icode == CODE_FOR_xvtstdcdp
14092 || icode == CODE_FOR_xvtstdcsp)
14094 /* Only allow 7-bit unsigned literals. */
14095 STRIP_NOPS (arg1);
14096 if (TREE_CODE (arg1) != INTEGER_CST
14097 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14099 error ("argument 2 must be a 7-bit unsigned literal");
14100 return CONST0_RTX (tmode);
14103 else if (icode == CODE_FOR_unpackv1ti
14104 || icode == CODE_FOR_unpackkf
14105 || icode == CODE_FOR_unpacktf
14106 || icode == CODE_FOR_unpackif
14107 || icode == CODE_FOR_unpacktd)
14109 /* Only allow 1-bit unsigned literals. */
14110 STRIP_NOPS (arg1);
14111 if (TREE_CODE (arg1) != INTEGER_CST
14112 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14114 error ("argument 2 must be a 1-bit unsigned literal");
14115 return CONST0_RTX (tmode);
14119 if (target == 0
14120 || GET_MODE (target) != tmode
14121 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14122 target = gen_reg_rtx (tmode);
14124 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14125 op0 = copy_to_mode_reg (mode0, op0);
14126 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14127 op1 = copy_to_mode_reg (mode1, op1);
14129 pat = GEN_FCN (icode) (target, op0, op1);
14130 if (! pat)
14131 return 0;
14132 emit_insn (pat);
14134 return target;
14137 static rtx
14138 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14140 rtx pat, scratch;
14141 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14142 tree arg0 = CALL_EXPR_ARG (exp, 1);
14143 tree arg1 = CALL_EXPR_ARG (exp, 2);
14144 rtx op0 = expand_normal (arg0);
14145 rtx op1 = expand_normal (arg1);
14146 machine_mode tmode = SImode;
14147 machine_mode mode0 = insn_data[icode].operand[1].mode;
14148 machine_mode mode1 = insn_data[icode].operand[2].mode;
14149 int cr6_form_int;
14151 if (TREE_CODE (cr6_form) != INTEGER_CST)
14153 error ("argument 1 of %qs must be a constant",
14154 "__builtin_altivec_predicate");
14155 return const0_rtx;
14157 else
14158 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14160 gcc_assert (mode0 == mode1);
14162 /* If we have invalid arguments, bail out before generating bad rtl. */
14163 if (arg0 == error_mark_node || arg1 == error_mark_node)
14164 return const0_rtx;
14166 if (target == 0
14167 || GET_MODE (target) != tmode
14168 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14169 target = gen_reg_rtx (tmode);
14171 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14172 op0 = copy_to_mode_reg (mode0, op0);
14173 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14174 op1 = copy_to_mode_reg (mode1, op1);
14176 /* Note that for many of the relevant operations (e.g. cmpne or
14177 cmpeq) with float or double operands, it makes more sense for the
14178 mode of the allocated scratch register to select a vector of
14179 integer. But the choice to copy the mode of operand 0 was made
14180 long ago and there are no plans to change it. */
14181 scratch = gen_reg_rtx (mode0);
14183 pat = GEN_FCN (icode) (scratch, op0, op1);
14184 if (! pat)
14185 return 0;
14186 emit_insn (pat);
14188 /* The vec_any* and vec_all* predicates use the same opcodes for two
14189 different operations, but the bits in CR6 will be different
14190 depending on what information we want. So we have to play tricks
14191 with CR6 to get the right bits out.
14193 If you think this is disgusting, look at the specs for the
14194 AltiVec predicates. */
14196 switch (cr6_form_int)
14198 case 0:
14199 emit_insn (gen_cr6_test_for_zero (target));
14200 break;
14201 case 1:
14202 emit_insn (gen_cr6_test_for_zero_reverse (target));
14203 break;
14204 case 2:
14205 emit_insn (gen_cr6_test_for_lt (target));
14206 break;
14207 case 3:
14208 emit_insn (gen_cr6_test_for_lt_reverse (target));
14209 break;
14210 default:
14211 error ("argument 1 of %qs is out of range",
14212 "__builtin_altivec_predicate");
14213 break;
14216 return target;
14219 static rtx
14220 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14222 rtx pat, addr;
14223 tree arg0 = CALL_EXPR_ARG (exp, 0);
14224 tree arg1 = CALL_EXPR_ARG (exp, 1);
14225 machine_mode tmode = insn_data[icode].operand[0].mode;
14226 machine_mode mode0 = Pmode;
14227 machine_mode mode1 = Pmode;
14228 rtx op0 = expand_normal (arg0);
14229 rtx op1 = expand_normal (arg1);
14231 if (icode == CODE_FOR_nothing)
14232 /* Builtin not supported on this processor. */
14233 return 0;
14235 /* If we got invalid arguments bail out before generating bad rtl. */
14236 if (arg0 == error_mark_node || arg1 == error_mark_node)
14237 return const0_rtx;
14239 if (target == 0
14240 || GET_MODE (target) != tmode
14241 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14242 target = gen_reg_rtx (tmode);
14244 op1 = copy_to_mode_reg (mode1, op1);
14246 if (op0 == const0_rtx)
14248 addr = gen_rtx_MEM (tmode, op1);
14250 else
14252 op0 = copy_to_mode_reg (mode0, op0);
14253 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14256 pat = GEN_FCN (icode) (target, addr);
14258 if (! pat)
14259 return 0;
14260 emit_insn (pat);
14262 return target;
14265 /* Return a constant vector for use as a little-endian permute control vector
14266 to reverse the order of elements of the given vector mode. */
14267 static rtx
14268 swap_selector_for_mode (machine_mode mode)
14270 /* These are little endian vectors, so their elements are reversed
14271 from what you would normally expect for a permute control vector. */
14272 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14273 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14274 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14275 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14276 unsigned int *swaparray, i;
14277 rtx perm[16];
14279 switch (mode)
14281 case E_V2DFmode:
14282 case E_V2DImode:
14283 swaparray = swap2;
14284 break;
14285 case E_V4SFmode:
14286 case E_V4SImode:
14287 swaparray = swap4;
14288 break;
14289 case E_V8HImode:
14290 swaparray = swap8;
14291 break;
14292 case E_V16QImode:
14293 swaparray = swap16;
14294 break;
14295 default:
14296 gcc_unreachable ();
14299 for (i = 0; i < 16; ++i)
14300 perm[i] = GEN_INT (swaparray[i]);
14302 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14306 swap_endian_selector_for_mode (machine_mode mode)
14308 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
14309 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14310 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14311 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14313 unsigned int *swaparray, i;
14314 rtx perm[16];
14316 switch (mode)
14318 case E_V1TImode:
14319 swaparray = swap1;
14320 break;
14321 case E_V2DFmode:
14322 case E_V2DImode:
14323 swaparray = swap2;
14324 break;
14325 case E_V4SFmode:
14326 case E_V4SImode:
14327 swaparray = swap4;
14328 break;
14329 case E_V8HImode:
14330 swaparray = swap8;
14331 break;
14332 default:
14333 gcc_unreachable ();
14336 for (i = 0; i < 16; ++i)
14337 perm[i] = GEN_INT (swaparray[i]);
14339 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
14340 gen_rtvec_v (16, perm)));
14343 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14344 with -maltivec=be specified. Issue the load followed by an element-
14345 reversing permute. */
14346 void
14347 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14349 rtx tmp = gen_reg_rtx (mode);
14350 rtx load = gen_rtx_SET (tmp, op1);
14351 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14352 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14353 rtx sel = swap_selector_for_mode (mode);
14354 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14356 gcc_assert (REG_P (op0));
14357 emit_insn (par);
14358 emit_insn (gen_rtx_SET (op0, vperm));
14361 /* Generate code for a "stvxl" built-in for a little endian target with
14362 -maltivec=be specified. Issue the store preceded by an element-reversing
14363 permute. */
14364 void
14365 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14367 rtx tmp = gen_reg_rtx (mode);
14368 rtx store = gen_rtx_SET (op0, tmp);
14369 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14370 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14371 rtx sel = swap_selector_for_mode (mode);
14372 rtx vperm;
14374 gcc_assert (REG_P (op1));
14375 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14376 emit_insn (gen_rtx_SET (tmp, vperm));
14377 emit_insn (par);
14380 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14381 specified. Issue the store preceded by an element-reversing permute. */
14382 void
14383 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14385 machine_mode inner_mode = GET_MODE_INNER (mode);
14386 rtx tmp = gen_reg_rtx (mode);
14387 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14388 rtx sel = swap_selector_for_mode (mode);
14389 rtx vperm;
14391 gcc_assert (REG_P (op1));
14392 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14393 emit_insn (gen_rtx_SET (tmp, vperm));
14394 emit_insn (gen_rtx_SET (op0, stvx));
14397 static rtx
14398 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14400 rtx pat, addr;
14401 tree arg0 = CALL_EXPR_ARG (exp, 0);
14402 tree arg1 = CALL_EXPR_ARG (exp, 1);
14403 machine_mode tmode = insn_data[icode].operand[0].mode;
14404 machine_mode mode0 = Pmode;
14405 machine_mode mode1 = Pmode;
14406 rtx op0 = expand_normal (arg0);
14407 rtx op1 = expand_normal (arg1);
14409 if (icode == CODE_FOR_nothing)
14410 /* Builtin not supported on this processor. */
14411 return 0;
14413 /* If we got invalid arguments bail out before generating bad rtl. */
14414 if (arg0 == error_mark_node || arg1 == error_mark_node)
14415 return const0_rtx;
14417 if (target == 0
14418 || GET_MODE (target) != tmode
14419 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14420 target = gen_reg_rtx (tmode);
14422 op1 = copy_to_mode_reg (mode1, op1);
14424 /* For LVX, express the RTL accurately by ANDing the address with -16.
14425 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14426 so the raw address is fine. */
14427 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14428 || icode == CODE_FOR_altivec_lvx_v2di_2op
14429 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14430 || icode == CODE_FOR_altivec_lvx_v4si_2op
14431 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14432 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14434 rtx rawaddr;
14435 if (op0 == const0_rtx)
14436 rawaddr = op1;
14437 else
14439 op0 = copy_to_mode_reg (mode0, op0);
14440 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14442 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14443 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14445 /* For -maltivec=be, emit the load and follow it up with a
14446 permute to swap the elements. */
14447 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14449 rtx temp = gen_reg_rtx (tmode);
14450 emit_insn (gen_rtx_SET (temp, addr));
14452 rtx sel = swap_selector_for_mode (tmode);
14453 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14454 UNSPEC_VPERM);
14455 emit_insn (gen_rtx_SET (target, vperm));
14457 else
14458 emit_insn (gen_rtx_SET (target, addr));
14460 else
14462 if (op0 == const0_rtx)
14463 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14464 else
14466 op0 = copy_to_mode_reg (mode0, op0);
14467 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14468 gen_rtx_PLUS (Pmode, op1, op0));
14471 pat = GEN_FCN (icode) (target, addr);
14472 if (! pat)
14473 return 0;
14474 emit_insn (pat);
14477 return target;
14480 static rtx
14481 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14483 tree arg0 = CALL_EXPR_ARG (exp, 0);
14484 tree arg1 = CALL_EXPR_ARG (exp, 1);
14485 tree arg2 = CALL_EXPR_ARG (exp, 2);
14486 rtx op0 = expand_normal (arg0);
14487 rtx op1 = expand_normal (arg1);
14488 rtx op2 = expand_normal (arg2);
14489 rtx pat, addr;
14490 machine_mode tmode = insn_data[icode].operand[0].mode;
14491 machine_mode mode1 = Pmode;
14492 machine_mode mode2 = Pmode;
14494 /* Invalid arguments. Bail before doing anything stoopid! */
14495 if (arg0 == error_mark_node
14496 || arg1 == error_mark_node
14497 || arg2 == error_mark_node)
14498 return const0_rtx;
14500 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14501 op0 = copy_to_mode_reg (tmode, op0);
14503 op2 = copy_to_mode_reg (mode2, op2);
14505 if (op1 == const0_rtx)
14507 addr = gen_rtx_MEM (tmode, op2);
14509 else
14511 op1 = copy_to_mode_reg (mode1, op1);
14512 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14515 pat = GEN_FCN (icode) (addr, op0);
14516 if (pat)
14517 emit_insn (pat);
14518 return NULL_RTX;
14521 static rtx
14522 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14524 rtx pat;
14525 tree arg0 = CALL_EXPR_ARG (exp, 0);
14526 tree arg1 = CALL_EXPR_ARG (exp, 1);
14527 tree arg2 = CALL_EXPR_ARG (exp, 2);
14528 rtx op0 = expand_normal (arg0);
14529 rtx op1 = expand_normal (arg1);
14530 rtx op2 = expand_normal (arg2);
14531 machine_mode mode0 = insn_data[icode].operand[0].mode;
14532 machine_mode mode1 = insn_data[icode].operand[1].mode;
14533 machine_mode mode2 = insn_data[icode].operand[2].mode;
14535 if (icode == CODE_FOR_nothing)
14536 /* Builtin not supported on this processor. */
14537 return NULL_RTX;
14539 /* If we got invalid arguments bail out before generating bad rtl. */
14540 if (arg0 == error_mark_node
14541 || arg1 == error_mark_node
14542 || arg2 == error_mark_node)
14543 return NULL_RTX;
14545 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14546 op0 = copy_to_mode_reg (mode0, op0);
14547 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14548 op1 = copy_to_mode_reg (mode1, op1);
14549 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14550 op2 = copy_to_mode_reg (mode2, op2);
14552 pat = GEN_FCN (icode) (op0, op1, op2);
14553 if (pat)
14554 emit_insn (pat);
14556 return NULL_RTX;
14559 static rtx
14560 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14562 tree arg0 = CALL_EXPR_ARG (exp, 0);
14563 tree arg1 = CALL_EXPR_ARG (exp, 1);
14564 tree arg2 = CALL_EXPR_ARG (exp, 2);
14565 rtx op0 = expand_normal (arg0);
14566 rtx op1 = expand_normal (arg1);
14567 rtx op2 = expand_normal (arg2);
14568 rtx pat, addr, rawaddr;
14569 machine_mode tmode = insn_data[icode].operand[0].mode;
14570 machine_mode smode = insn_data[icode].operand[1].mode;
14571 machine_mode mode1 = Pmode;
14572 machine_mode mode2 = Pmode;
14574 /* Invalid arguments. Bail before doing anything stoopid! */
14575 if (arg0 == error_mark_node
14576 || arg1 == error_mark_node
14577 || arg2 == error_mark_node)
14578 return const0_rtx;
14580 op2 = copy_to_mode_reg (mode2, op2);
14582 /* For STVX, express the RTL accurately by ANDing the address with -16.
14583 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14584 so the raw address is fine. */
14585 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14586 || icode == CODE_FOR_altivec_stvx_v2di_2op
14587 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14588 || icode == CODE_FOR_altivec_stvx_v4si_2op
14589 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14590 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14592 if (op1 == const0_rtx)
14593 rawaddr = op2;
14594 else
14596 op1 = copy_to_mode_reg (mode1, op1);
14597 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14600 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14601 addr = gen_rtx_MEM (tmode, addr);
14603 op0 = copy_to_mode_reg (tmode, op0);
14605 /* For -maltivec=be, emit a permute to swap the elements, followed
14606 by the store. */
14607 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14609 rtx temp = gen_reg_rtx (tmode);
14610 rtx sel = swap_selector_for_mode (tmode);
14611 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14612 UNSPEC_VPERM);
14613 emit_insn (gen_rtx_SET (temp, vperm));
14614 emit_insn (gen_rtx_SET (addr, temp));
14616 else
14617 emit_insn (gen_rtx_SET (addr, op0));
14619 else
14621 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14622 op0 = copy_to_mode_reg (smode, op0);
14624 if (op1 == const0_rtx)
14625 addr = gen_rtx_MEM (tmode, op2);
14626 else
14628 op1 = copy_to_mode_reg (mode1, op1);
14629 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14632 pat = GEN_FCN (icode) (addr, op0);
14633 if (pat)
14634 emit_insn (pat);
14637 return NULL_RTX;
14640 /* Return the appropriate SPR number associated with the given builtin. */
14641 static inline HOST_WIDE_INT
14642 htm_spr_num (enum rs6000_builtins code)
14644 if (code == HTM_BUILTIN_GET_TFHAR
14645 || code == HTM_BUILTIN_SET_TFHAR)
14646 return TFHAR_SPR;
14647 else if (code == HTM_BUILTIN_GET_TFIAR
14648 || code == HTM_BUILTIN_SET_TFIAR)
14649 return TFIAR_SPR;
14650 else if (code == HTM_BUILTIN_GET_TEXASR
14651 || code == HTM_BUILTIN_SET_TEXASR)
14652 return TEXASR_SPR;
14653 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14654 || code == HTM_BUILTIN_SET_TEXASRU);
14655 return TEXASRU_SPR;
14658 /* Return the appropriate SPR regno associated with the given builtin. */
14659 static inline HOST_WIDE_INT
14660 htm_spr_regno (enum rs6000_builtins code)
14662 if (code == HTM_BUILTIN_GET_TFHAR
14663 || code == HTM_BUILTIN_SET_TFHAR)
14664 return TFHAR_REGNO;
14665 else if (code == HTM_BUILTIN_GET_TFIAR
14666 || code == HTM_BUILTIN_SET_TFIAR)
14667 return TFIAR_REGNO;
14668 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14669 || code == HTM_BUILTIN_SET_TEXASR
14670 || code == HTM_BUILTIN_GET_TEXASRU
14671 || code == HTM_BUILTIN_SET_TEXASRU);
14672 return TEXASR_REGNO;
14675 /* Return the correct ICODE value depending on whether we are
14676 setting or reading the HTM SPRs. */
14677 static inline enum insn_code
14678 rs6000_htm_spr_icode (bool nonvoid)
14680 if (nonvoid)
14681 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14682 else
14683 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14686 /* Expand the HTM builtin in EXP and store the result in TARGET.
14687 Store true in *EXPANDEDP if we found a builtin to expand. */
14688 static rtx
14689 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14691 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14692 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14693 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14694 const struct builtin_description *d;
14695 size_t i;
14697 *expandedp = true;
14699 if (!TARGET_POWERPC64
14700 && (fcode == HTM_BUILTIN_TABORTDC
14701 || fcode == HTM_BUILTIN_TABORTDCI))
14703 size_t uns_fcode = (size_t)fcode;
14704 const char *name = rs6000_builtin_info[uns_fcode].name;
14705 error ("builtin %qs is only valid in 64-bit mode", name);
14706 return const0_rtx;
14709 /* Expand the HTM builtins. */
14710 d = bdesc_htm;
14711 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14712 if (d->code == fcode)
14714 rtx op[MAX_HTM_OPERANDS], pat;
14715 int nopnds = 0;
14716 tree arg;
14717 call_expr_arg_iterator iter;
14718 unsigned attr = rs6000_builtin_info[fcode].attr;
14719 enum insn_code icode = d->icode;
14720 const struct insn_operand_data *insn_op;
14721 bool uses_spr = (attr & RS6000_BTC_SPR);
14722 rtx cr = NULL_RTX;
14724 if (uses_spr)
14725 icode = rs6000_htm_spr_icode (nonvoid);
14726 insn_op = &insn_data[icode].operand[0];
14728 if (nonvoid)
14730 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14731 if (!target
14732 || GET_MODE (target) != tmode
14733 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14734 target = gen_reg_rtx (tmode);
14735 if (uses_spr)
14736 op[nopnds++] = target;
14739 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14741 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14742 return const0_rtx;
14744 insn_op = &insn_data[icode].operand[nopnds];
14746 op[nopnds] = expand_normal (arg);
14748 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14750 if (!strcmp (insn_op->constraint, "n"))
14752 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14753 if (!CONST_INT_P (op[nopnds]))
14754 error ("argument %d must be an unsigned literal", arg_num);
14755 else
14756 error ("argument %d is an unsigned literal that is "
14757 "out of range", arg_num);
14758 return const0_rtx;
14760 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14763 nopnds++;
14766 /* Handle the builtins for extended mnemonics. These accept
14767 no arguments, but map to builtins that take arguments. */
14768 switch (fcode)
14770 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14771 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14772 op[nopnds++] = GEN_INT (1);
14773 if (flag_checking)
14774 attr |= RS6000_BTC_UNARY;
14775 break;
14776 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14777 op[nopnds++] = GEN_INT (0);
14778 if (flag_checking)
14779 attr |= RS6000_BTC_UNARY;
14780 break;
14781 default:
14782 break;
14785 /* If this builtin accesses SPRs, then pass in the appropriate
14786 SPR number and SPR regno as the last two operands. */
14787 if (uses_spr)
14789 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14790 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14791 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14793 /* If this builtin accesses a CR, then pass in a scratch
14794 CR as the last operand. */
14795 else if (attr & RS6000_BTC_CR)
14796 { cr = gen_reg_rtx (CCmode);
14797 op[nopnds++] = cr;
14800 if (flag_checking)
14802 int expected_nopnds = 0;
14803 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14804 expected_nopnds = 1;
14805 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14806 expected_nopnds = 2;
14807 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14808 expected_nopnds = 3;
14809 if (!(attr & RS6000_BTC_VOID))
14810 expected_nopnds += 1;
14811 if (uses_spr)
14812 expected_nopnds += 2;
14814 gcc_assert (nopnds == expected_nopnds
14815 && nopnds <= MAX_HTM_OPERANDS);
14818 switch (nopnds)
14820 case 1:
14821 pat = GEN_FCN (icode) (op[0]);
14822 break;
14823 case 2:
14824 pat = GEN_FCN (icode) (op[0], op[1]);
14825 break;
14826 case 3:
14827 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14828 break;
14829 case 4:
14830 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14831 break;
14832 default:
14833 gcc_unreachable ();
14835 if (!pat)
14836 return NULL_RTX;
14837 emit_insn (pat);
14839 if (attr & RS6000_BTC_CR)
14841 if (fcode == HTM_BUILTIN_TBEGIN)
14843 /* Emit code to set TARGET to true or false depending on
14844 whether the tbegin. instruction successfully or failed
14845 to start a transaction. We do this by placing the 1's
14846 complement of CR's EQ bit into TARGET. */
14847 rtx scratch = gen_reg_rtx (SImode);
14848 emit_insn (gen_rtx_SET (scratch,
14849 gen_rtx_EQ (SImode, cr,
14850 const0_rtx)));
14851 emit_insn (gen_rtx_SET (target,
14852 gen_rtx_XOR (SImode, scratch,
14853 GEN_INT (1))));
14855 else
14857 /* Emit code to copy the 4-bit condition register field
14858 CR into the least significant end of register TARGET. */
14859 rtx scratch1 = gen_reg_rtx (SImode);
14860 rtx scratch2 = gen_reg_rtx (SImode);
14861 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14862 emit_insn (gen_movcc (subreg, cr));
14863 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14864 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14868 if (nonvoid)
14869 return target;
14870 return const0_rtx;
14873 *expandedp = false;
14874 return NULL_RTX;
14877 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14879 static rtx
14880 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14881 rtx target)
14883 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14884 if (fcode == RS6000_BUILTIN_CPU_INIT)
14885 return const0_rtx;
14887 if (target == 0 || GET_MODE (target) != SImode)
14888 target = gen_reg_rtx (SImode);
14890 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14891 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14892 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14893 to a STRING_CST. */
14894 if (TREE_CODE (arg) == ARRAY_REF
14895 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14896 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14897 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14898 arg = TREE_OPERAND (arg, 0);
14900 if (TREE_CODE (arg) != STRING_CST)
14902 error ("builtin %qs only accepts a string argument",
14903 rs6000_builtin_info[(size_t) fcode].name);
14904 return const0_rtx;
14907 if (fcode == RS6000_BUILTIN_CPU_IS)
14909 const char *cpu = TREE_STRING_POINTER (arg);
14910 rtx cpuid = NULL_RTX;
14911 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14912 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14914 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14915 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14916 break;
14918 if (cpuid == NULL_RTX)
14920 /* Invalid CPU argument. */
14921 error ("cpu %qs is an invalid argument to builtin %qs",
14922 cpu, rs6000_builtin_info[(size_t) fcode].name);
14923 return const0_rtx;
14926 rtx platform = gen_reg_rtx (SImode);
14927 rtx tcbmem = gen_const_mem (SImode,
14928 gen_rtx_PLUS (Pmode,
14929 gen_rtx_REG (Pmode, TLS_REGNUM),
14930 GEN_INT (TCB_PLATFORM_OFFSET)));
14931 emit_move_insn (platform, tcbmem);
14932 emit_insn (gen_eqsi3 (target, platform, cpuid));
14934 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14936 const char *hwcap = TREE_STRING_POINTER (arg);
14937 rtx mask = NULL_RTX;
14938 int hwcap_offset;
14939 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14940 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14942 mask = GEN_INT (cpu_supports_info[i].mask);
14943 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14944 break;
14946 if (mask == NULL_RTX)
14948 /* Invalid HWCAP argument. */
14949 error ("%s %qs is an invalid argument to builtin %qs",
14950 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14951 return const0_rtx;
14954 rtx tcb_hwcap = gen_reg_rtx (SImode);
14955 rtx tcbmem = gen_const_mem (SImode,
14956 gen_rtx_PLUS (Pmode,
14957 gen_rtx_REG (Pmode, TLS_REGNUM),
14958 GEN_INT (hwcap_offset)));
14959 emit_move_insn (tcb_hwcap, tcbmem);
14960 rtx scratch1 = gen_reg_rtx (SImode);
14961 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14962 rtx scratch2 = gen_reg_rtx (SImode);
14963 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14964 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14966 else
14967 gcc_unreachable ();
14969 /* Record that we have expanded a CPU builtin, so that we can later
14970 emit a reference to the special symbol exported by LIBC to ensure we
14971 do not link against an old LIBC that doesn't support this feature. */
14972 cpu_builtin_p = true;
14974 #else
14975 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14976 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14978 /* For old LIBCs, always return FALSE. */
14979 emit_move_insn (target, GEN_INT (0));
14980 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14982 return target;
14985 static rtx
14986 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14988 rtx pat;
14989 tree arg0 = CALL_EXPR_ARG (exp, 0);
14990 tree arg1 = CALL_EXPR_ARG (exp, 1);
14991 tree arg2 = CALL_EXPR_ARG (exp, 2);
14992 rtx op0 = expand_normal (arg0);
14993 rtx op1 = expand_normal (arg1);
14994 rtx op2 = expand_normal (arg2);
14995 machine_mode tmode = insn_data[icode].operand[0].mode;
14996 machine_mode mode0 = insn_data[icode].operand[1].mode;
14997 machine_mode mode1 = insn_data[icode].operand[2].mode;
14998 machine_mode mode2 = insn_data[icode].operand[3].mode;
15000 if (icode == CODE_FOR_nothing)
15001 /* Builtin not supported on this processor. */
15002 return 0;
15004 /* If we got invalid arguments bail out before generating bad rtl. */
15005 if (arg0 == error_mark_node
15006 || arg1 == error_mark_node
15007 || arg2 == error_mark_node)
15008 return const0_rtx;
15010 /* Check and prepare argument depending on the instruction code.
15012 Note that a switch statement instead of the sequence of tests
15013 would be incorrect as many of the CODE_FOR values could be
15014 CODE_FOR_nothing and that would yield multiple alternatives
15015 with identical values. We'd never reach here at runtime in
15016 this case. */
15017 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15018 || icode == CODE_FOR_altivec_vsldoi_v2df
15019 || icode == CODE_FOR_altivec_vsldoi_v4si
15020 || icode == CODE_FOR_altivec_vsldoi_v8hi
15021 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15023 /* Only allow 4-bit unsigned literals. */
15024 STRIP_NOPS (arg2);
15025 if (TREE_CODE (arg2) != INTEGER_CST
15026 || TREE_INT_CST_LOW (arg2) & ~0xf)
15028 error ("argument 3 must be a 4-bit unsigned literal");
15029 return CONST0_RTX (tmode);
15032 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15033 || icode == CODE_FOR_vsx_xxpermdi_v2di
15034 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15035 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15036 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15037 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15038 || icode == CODE_FOR_vsx_xxpermdi_v4si
15039 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15040 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15041 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15042 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15043 || icode == CODE_FOR_vsx_xxsldwi_v4si
15044 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15045 || icode == CODE_FOR_vsx_xxsldwi_v2di
15046 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15048 /* Only allow 2-bit unsigned literals. */
15049 STRIP_NOPS (arg2);
15050 if (TREE_CODE (arg2) != INTEGER_CST
15051 || TREE_INT_CST_LOW (arg2) & ~0x3)
15053 error ("argument 3 must be a 2-bit unsigned literal");
15054 return CONST0_RTX (tmode);
15057 else if (icode == CODE_FOR_vsx_set_v2df
15058 || icode == CODE_FOR_vsx_set_v2di
15059 || icode == CODE_FOR_bcdadd
15060 || icode == CODE_FOR_bcdadd_lt
15061 || icode == CODE_FOR_bcdadd_eq
15062 || icode == CODE_FOR_bcdadd_gt
15063 || icode == CODE_FOR_bcdsub
15064 || icode == CODE_FOR_bcdsub_lt
15065 || icode == CODE_FOR_bcdsub_eq
15066 || icode == CODE_FOR_bcdsub_gt)
15068 /* Only allow 1-bit unsigned literals. */
15069 STRIP_NOPS (arg2);
15070 if (TREE_CODE (arg2) != INTEGER_CST
15071 || TREE_INT_CST_LOW (arg2) & ~0x1)
15073 error ("argument 3 must be a 1-bit unsigned literal");
15074 return CONST0_RTX (tmode);
15077 else if (icode == CODE_FOR_dfp_ddedpd_dd
15078 || icode == CODE_FOR_dfp_ddedpd_td)
15080 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15081 STRIP_NOPS (arg0);
15082 if (TREE_CODE (arg0) != INTEGER_CST
15083 || TREE_INT_CST_LOW (arg2) & ~0x3)
15085 error ("argument 1 must be 0 or 2");
15086 return CONST0_RTX (tmode);
15089 else if (icode == CODE_FOR_dfp_denbcd_dd
15090 || icode == CODE_FOR_dfp_denbcd_td)
15092 /* Only allow 1-bit unsigned literals. */
15093 STRIP_NOPS (arg0);
15094 if (TREE_CODE (arg0) != INTEGER_CST
15095 || TREE_INT_CST_LOW (arg0) & ~0x1)
15097 error ("argument 1 must be a 1-bit unsigned literal");
15098 return CONST0_RTX (tmode);
15101 else if (icode == CODE_FOR_dfp_dscli_dd
15102 || icode == CODE_FOR_dfp_dscli_td
15103 || icode == CODE_FOR_dfp_dscri_dd
15104 || icode == CODE_FOR_dfp_dscri_td)
15106 /* Only allow 6-bit unsigned literals. */
15107 STRIP_NOPS (arg1);
15108 if (TREE_CODE (arg1) != INTEGER_CST
15109 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15111 error ("argument 2 must be a 6-bit unsigned literal");
15112 return CONST0_RTX (tmode);
15115 else if (icode == CODE_FOR_crypto_vshasigmaw
15116 || icode == CODE_FOR_crypto_vshasigmad)
15118 /* Check whether the 2nd and 3rd arguments are integer constants and in
15119 range and prepare arguments. */
15120 STRIP_NOPS (arg1);
15121 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
15123 error ("argument 2 must be 0 or 1");
15124 return CONST0_RTX (tmode);
15127 STRIP_NOPS (arg2);
15128 if (TREE_CODE (arg2) != INTEGER_CST
15129 || wi::geu_p (wi::to_wide (arg2), 16))
15131 error ("argument 3 must be in the range 0..15");
15132 return CONST0_RTX (tmode);
15136 if (target == 0
15137 || GET_MODE (target) != tmode
15138 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15139 target = gen_reg_rtx (tmode);
15141 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15142 op0 = copy_to_mode_reg (mode0, op0);
15143 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15144 op1 = copy_to_mode_reg (mode1, op1);
15145 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15146 op2 = copy_to_mode_reg (mode2, op2);
15148 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15149 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15150 else
15151 pat = GEN_FCN (icode) (target, op0, op1, op2);
15152 if (! pat)
15153 return 0;
15154 emit_insn (pat);
15156 return target;
15159 /* Expand the lvx builtins. */
15160 static rtx
15161 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15163 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15164 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15165 tree arg0;
15166 machine_mode tmode, mode0;
15167 rtx pat, op0;
15168 enum insn_code icode;
15170 switch (fcode)
15172 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15173 icode = CODE_FOR_vector_altivec_load_v16qi;
15174 break;
15175 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15176 icode = CODE_FOR_vector_altivec_load_v8hi;
15177 break;
15178 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15179 icode = CODE_FOR_vector_altivec_load_v4si;
15180 break;
15181 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15182 icode = CODE_FOR_vector_altivec_load_v4sf;
15183 break;
15184 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15185 icode = CODE_FOR_vector_altivec_load_v2df;
15186 break;
15187 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15188 icode = CODE_FOR_vector_altivec_load_v2di;
15189 break;
15190 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15191 icode = CODE_FOR_vector_altivec_load_v1ti;
15192 break;
15193 default:
15194 *expandedp = false;
15195 return NULL_RTX;
15198 *expandedp = true;
15200 arg0 = CALL_EXPR_ARG (exp, 0);
15201 op0 = expand_normal (arg0);
15202 tmode = insn_data[icode].operand[0].mode;
15203 mode0 = insn_data[icode].operand[1].mode;
15205 if (target == 0
15206 || GET_MODE (target) != tmode
15207 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15208 target = gen_reg_rtx (tmode);
15210 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15211 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15213 pat = GEN_FCN (icode) (target, op0);
15214 if (! pat)
15215 return 0;
15216 emit_insn (pat);
15217 return target;
15220 /* Expand the stvx builtins. */
15221 static rtx
15222 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15223 bool *expandedp)
15225 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15226 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15227 tree arg0, arg1;
15228 machine_mode mode0, mode1;
15229 rtx pat, op0, op1;
15230 enum insn_code icode;
15232 switch (fcode)
15234 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15235 icode = CODE_FOR_vector_altivec_store_v16qi;
15236 break;
15237 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15238 icode = CODE_FOR_vector_altivec_store_v8hi;
15239 break;
15240 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15241 icode = CODE_FOR_vector_altivec_store_v4si;
15242 break;
15243 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15244 icode = CODE_FOR_vector_altivec_store_v4sf;
15245 break;
15246 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15247 icode = CODE_FOR_vector_altivec_store_v2df;
15248 break;
15249 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15250 icode = CODE_FOR_vector_altivec_store_v2di;
15251 break;
15252 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15253 icode = CODE_FOR_vector_altivec_store_v1ti;
15254 break;
15255 default:
15256 *expandedp = false;
15257 return NULL_RTX;
15260 arg0 = CALL_EXPR_ARG (exp, 0);
15261 arg1 = CALL_EXPR_ARG (exp, 1);
15262 op0 = expand_normal (arg0);
15263 op1 = expand_normal (arg1);
15264 mode0 = insn_data[icode].operand[0].mode;
15265 mode1 = insn_data[icode].operand[1].mode;
15267 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15268 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15269 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15270 op1 = copy_to_mode_reg (mode1, op1);
15272 pat = GEN_FCN (icode) (op0, op1);
15273 if (pat)
15274 emit_insn (pat);
15276 *expandedp = true;
15277 return NULL_RTX;
15280 /* Expand the dst builtins. */
15281 static rtx
15282 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15283 bool *expandedp)
15285 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15286 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15287 tree arg0, arg1, arg2;
15288 machine_mode mode0, mode1;
15289 rtx pat, op0, op1, op2;
15290 const struct builtin_description *d;
15291 size_t i;
15293 *expandedp = false;
15295 /* Handle DST variants. */
15296 d = bdesc_dst;
15297 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15298 if (d->code == fcode)
15300 arg0 = CALL_EXPR_ARG (exp, 0);
15301 arg1 = CALL_EXPR_ARG (exp, 1);
15302 arg2 = CALL_EXPR_ARG (exp, 2);
15303 op0 = expand_normal (arg0);
15304 op1 = expand_normal (arg1);
15305 op2 = expand_normal (arg2);
15306 mode0 = insn_data[d->icode].operand[0].mode;
15307 mode1 = insn_data[d->icode].operand[1].mode;
15309 /* Invalid arguments, bail out before generating bad rtl. */
15310 if (arg0 == error_mark_node
15311 || arg1 == error_mark_node
15312 || arg2 == error_mark_node)
15313 return const0_rtx;
15315 *expandedp = true;
15316 STRIP_NOPS (arg2);
15317 if (TREE_CODE (arg2) != INTEGER_CST
15318 || TREE_INT_CST_LOW (arg2) & ~0x3)
15320 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15321 return const0_rtx;
15324 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15325 op0 = copy_to_mode_reg (Pmode, op0);
15326 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15327 op1 = copy_to_mode_reg (mode1, op1);
15329 pat = GEN_FCN (d->icode) (op0, op1, op2);
15330 if (pat != 0)
15331 emit_insn (pat);
15333 return NULL_RTX;
15336 return NULL_RTX;
15339 /* Expand vec_init builtin. */
15340 static rtx
15341 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15343 machine_mode tmode = TYPE_MODE (type);
15344 machine_mode inner_mode = GET_MODE_INNER (tmode);
15345 int i, n_elt = GET_MODE_NUNITS (tmode);
15347 gcc_assert (VECTOR_MODE_P (tmode));
15348 gcc_assert (n_elt == call_expr_nargs (exp));
15350 if (!target || !register_operand (target, tmode))
15351 target = gen_reg_rtx (tmode);
15353 /* If we have a vector compromised of a single element, such as V1TImode, do
15354 the initialization directly. */
15355 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15357 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15358 emit_move_insn (target, gen_lowpart (tmode, x));
15360 else
15362 rtvec v = rtvec_alloc (n_elt);
15364 for (i = 0; i < n_elt; ++i)
15366 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15367 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15370 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15373 return target;
15376 /* Return the integer constant in ARG. Constrain it to be in the range
15377 of the subparts of VEC_TYPE; issue an error if not. */
15379 static int
15380 get_element_number (tree vec_type, tree arg)
15382 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15384 if (!tree_fits_uhwi_p (arg)
15385 || (elt = tree_to_uhwi (arg), elt > max))
15387 error ("selector must be an integer constant in the range 0..%wi", max);
15388 return 0;
15391 return elt;
15394 /* Expand vec_set builtin. */
15395 static rtx
15396 altivec_expand_vec_set_builtin (tree exp)
15398 machine_mode tmode, mode1;
15399 tree arg0, arg1, arg2;
15400 int elt;
15401 rtx op0, op1;
15403 arg0 = CALL_EXPR_ARG (exp, 0);
15404 arg1 = CALL_EXPR_ARG (exp, 1);
15405 arg2 = CALL_EXPR_ARG (exp, 2);
15407 tmode = TYPE_MODE (TREE_TYPE (arg0));
15408 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15409 gcc_assert (VECTOR_MODE_P (tmode));
15411 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15412 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15413 elt = get_element_number (TREE_TYPE (arg0), arg2);
15415 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15416 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15418 op0 = force_reg (tmode, op0);
15419 op1 = force_reg (mode1, op1);
15421 rs6000_expand_vector_set (op0, op1, elt);
15423 return op0;
15426 /* Expand vec_ext builtin. */
15427 static rtx
15428 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15430 machine_mode tmode, mode0;
15431 tree arg0, arg1;
15432 rtx op0;
15433 rtx op1;
15435 arg0 = CALL_EXPR_ARG (exp, 0);
15436 arg1 = CALL_EXPR_ARG (exp, 1);
15438 op0 = expand_normal (arg0);
15439 op1 = expand_normal (arg1);
15441 /* Call get_element_number to validate arg1 if it is a constant. */
15442 if (TREE_CODE (arg1) == INTEGER_CST)
15443 (void) get_element_number (TREE_TYPE (arg0), arg1);
15445 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15446 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15447 gcc_assert (VECTOR_MODE_P (mode0));
15449 op0 = force_reg (mode0, op0);
15451 if (optimize || !target || !register_operand (target, tmode))
15452 target = gen_reg_rtx (tmode);
15454 rs6000_expand_vector_extract (target, op0, op1);
15456 return target;
15459 /* Expand the builtin in EXP and store the result in TARGET. Store
15460 true in *EXPANDEDP if we found a builtin to expand. */
15461 static rtx
15462 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15464 const struct builtin_description *d;
15465 size_t i;
15466 enum insn_code icode;
15467 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15468 tree arg0, arg1, arg2;
15469 rtx op0, pat;
15470 machine_mode tmode, mode0;
15471 enum rs6000_builtins fcode
15472 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15474 if (rs6000_overloaded_builtin_p (fcode))
15476 *expandedp = true;
15477 error ("unresolved overload for Altivec builtin %qF", fndecl);
15479 /* Given it is invalid, just generate a normal call. */
15480 return expand_call (exp, target, false);
15483 target = altivec_expand_ld_builtin (exp, target, expandedp);
15484 if (*expandedp)
15485 return target;
15487 target = altivec_expand_st_builtin (exp, target, expandedp);
15488 if (*expandedp)
15489 return target;
15491 target = altivec_expand_dst_builtin (exp, target, expandedp);
15492 if (*expandedp)
15493 return target;
15495 *expandedp = true;
15497 switch (fcode)
15499 case ALTIVEC_BUILTIN_STVX_V2DF:
15500 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15501 case ALTIVEC_BUILTIN_STVX_V2DI:
15502 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15503 case ALTIVEC_BUILTIN_STVX_V4SF:
15504 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15505 case ALTIVEC_BUILTIN_STVX:
15506 case ALTIVEC_BUILTIN_STVX_V4SI:
15507 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15508 case ALTIVEC_BUILTIN_STVX_V8HI:
15509 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15510 case ALTIVEC_BUILTIN_STVX_V16QI:
15511 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15512 case ALTIVEC_BUILTIN_STVEBX:
15513 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15514 case ALTIVEC_BUILTIN_STVEHX:
15515 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15516 case ALTIVEC_BUILTIN_STVEWX:
15517 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15518 case ALTIVEC_BUILTIN_STVXL_V2DF:
15519 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15520 case ALTIVEC_BUILTIN_STVXL_V2DI:
15521 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15522 case ALTIVEC_BUILTIN_STVXL_V4SF:
15523 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15524 case ALTIVEC_BUILTIN_STVXL:
15525 case ALTIVEC_BUILTIN_STVXL_V4SI:
15526 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15527 case ALTIVEC_BUILTIN_STVXL_V8HI:
15528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15529 case ALTIVEC_BUILTIN_STVXL_V16QI:
15530 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15532 case ALTIVEC_BUILTIN_STVLX:
15533 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15534 case ALTIVEC_BUILTIN_STVLXL:
15535 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15536 case ALTIVEC_BUILTIN_STVRX:
15537 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15538 case ALTIVEC_BUILTIN_STVRXL:
15539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15541 case P9V_BUILTIN_STXVL:
15542 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15544 case P9V_BUILTIN_XST_LEN_R:
15545 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
15547 case VSX_BUILTIN_STXVD2X_V1TI:
15548 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15549 case VSX_BUILTIN_STXVD2X_V2DF:
15550 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15551 case VSX_BUILTIN_STXVD2X_V2DI:
15552 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15553 case VSX_BUILTIN_STXVW4X_V4SF:
15554 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15555 case VSX_BUILTIN_STXVW4X_V4SI:
15556 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15557 case VSX_BUILTIN_STXVW4X_V8HI:
15558 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15559 case VSX_BUILTIN_STXVW4X_V16QI:
15560 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15562 /* For the following on big endian, it's ok to use any appropriate
15563 unaligned-supporting store, so use a generic expander. For
15564 little-endian, the exact element-reversing instruction must
15565 be used. */
15566 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15568 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15569 : CODE_FOR_vsx_st_elemrev_v2df);
15570 return altivec_expand_stv_builtin (code, exp);
15572 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15574 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15575 : CODE_FOR_vsx_st_elemrev_v2di);
15576 return altivec_expand_stv_builtin (code, exp);
15578 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15580 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15581 : CODE_FOR_vsx_st_elemrev_v4sf);
15582 return altivec_expand_stv_builtin (code, exp);
15584 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15586 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15587 : CODE_FOR_vsx_st_elemrev_v4si);
15588 return altivec_expand_stv_builtin (code, exp);
15590 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15592 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15593 : CODE_FOR_vsx_st_elemrev_v8hi);
15594 return altivec_expand_stv_builtin (code, exp);
15596 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15598 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15599 : CODE_FOR_vsx_st_elemrev_v16qi);
15600 return altivec_expand_stv_builtin (code, exp);
15603 case ALTIVEC_BUILTIN_MFVSCR:
15604 icode = CODE_FOR_altivec_mfvscr;
15605 tmode = insn_data[icode].operand[0].mode;
15607 if (target == 0
15608 || GET_MODE (target) != tmode
15609 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15610 target = gen_reg_rtx (tmode);
15612 pat = GEN_FCN (icode) (target);
15613 if (! pat)
15614 return 0;
15615 emit_insn (pat);
15616 return target;
15618 case ALTIVEC_BUILTIN_MTVSCR:
15619 icode = CODE_FOR_altivec_mtvscr;
15620 arg0 = CALL_EXPR_ARG (exp, 0);
15621 op0 = expand_normal (arg0);
15622 mode0 = insn_data[icode].operand[0].mode;
15624 /* If we got invalid arguments bail out before generating bad rtl. */
15625 if (arg0 == error_mark_node)
15626 return const0_rtx;
15628 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15629 op0 = copy_to_mode_reg (mode0, op0);
15631 pat = GEN_FCN (icode) (op0);
15632 if (pat)
15633 emit_insn (pat);
15634 return NULL_RTX;
15636 case ALTIVEC_BUILTIN_DSSALL:
15637 emit_insn (gen_altivec_dssall ());
15638 return NULL_RTX;
15640 case ALTIVEC_BUILTIN_DSS:
15641 icode = CODE_FOR_altivec_dss;
15642 arg0 = CALL_EXPR_ARG (exp, 0);
15643 STRIP_NOPS (arg0);
15644 op0 = expand_normal (arg0);
15645 mode0 = insn_data[icode].operand[0].mode;
15647 /* If we got invalid arguments bail out before generating bad rtl. */
15648 if (arg0 == error_mark_node)
15649 return const0_rtx;
15651 if (TREE_CODE (arg0) != INTEGER_CST
15652 || TREE_INT_CST_LOW (arg0) & ~0x3)
15654 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15655 return const0_rtx;
15658 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15659 op0 = copy_to_mode_reg (mode0, op0);
15661 emit_insn (gen_altivec_dss (op0));
15662 return NULL_RTX;
15664 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15665 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15666 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15667 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15668 case VSX_BUILTIN_VEC_INIT_V2DF:
15669 case VSX_BUILTIN_VEC_INIT_V2DI:
15670 case VSX_BUILTIN_VEC_INIT_V1TI:
15671 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15673 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15674 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15675 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15676 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15677 case VSX_BUILTIN_VEC_SET_V2DF:
15678 case VSX_BUILTIN_VEC_SET_V2DI:
15679 case VSX_BUILTIN_VEC_SET_V1TI:
15680 return altivec_expand_vec_set_builtin (exp);
15682 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15683 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15684 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15685 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15686 case VSX_BUILTIN_VEC_EXT_V2DF:
15687 case VSX_BUILTIN_VEC_EXT_V2DI:
15688 case VSX_BUILTIN_VEC_EXT_V1TI:
15689 return altivec_expand_vec_ext_builtin (exp, target);
15691 case P9V_BUILTIN_VEXTRACT4B:
15692 case P9V_BUILTIN_VEC_VEXTRACT4B:
15693 arg1 = CALL_EXPR_ARG (exp, 1);
15694 STRIP_NOPS (arg1);
15696 /* Generate a normal call if it is invalid. */
15697 if (arg1 == error_mark_node)
15698 return expand_call (exp, target, false);
15700 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15702 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15703 return expand_call (exp, target, false);
15705 break;
15707 case P9V_BUILTIN_VINSERT4B:
15708 case P9V_BUILTIN_VINSERT4B_DI:
15709 case P9V_BUILTIN_VEC_VINSERT4B:
15710 arg2 = CALL_EXPR_ARG (exp, 2);
15711 STRIP_NOPS (arg2);
15713 /* Generate a normal call if it is invalid. */
15714 if (arg2 == error_mark_node)
15715 return expand_call (exp, target, false);
15717 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15719 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15720 return expand_call (exp, target, false);
15722 break;
15724 default:
15725 break;
15726 /* Fall through. */
15729 /* Expand abs* operations. */
15730 d = bdesc_abs;
15731 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15732 if (d->code == fcode)
15733 return altivec_expand_abs_builtin (d->icode, exp, target);
15735 /* Expand the AltiVec predicates. */
15736 d = bdesc_altivec_preds;
15737 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15738 if (d->code == fcode)
15739 return altivec_expand_predicate_builtin (d->icode, exp, target);
15741 /* LV* are funky. We initialized them differently. */
15742 switch (fcode)
15744 case ALTIVEC_BUILTIN_LVSL:
15745 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15746 exp, target, false);
15747 case ALTIVEC_BUILTIN_LVSR:
15748 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15749 exp, target, false);
15750 case ALTIVEC_BUILTIN_LVEBX:
15751 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15752 exp, target, false);
15753 case ALTIVEC_BUILTIN_LVEHX:
15754 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15755 exp, target, false);
15756 case ALTIVEC_BUILTIN_LVEWX:
15757 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15758 exp, target, false);
15759 case ALTIVEC_BUILTIN_LVXL_V2DF:
15760 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15761 exp, target, false);
15762 case ALTIVEC_BUILTIN_LVXL_V2DI:
15763 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15764 exp, target, false);
15765 case ALTIVEC_BUILTIN_LVXL_V4SF:
15766 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15767 exp, target, false);
15768 case ALTIVEC_BUILTIN_LVXL:
15769 case ALTIVEC_BUILTIN_LVXL_V4SI:
15770 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15771 exp, target, false);
15772 case ALTIVEC_BUILTIN_LVXL_V8HI:
15773 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15774 exp, target, false);
15775 case ALTIVEC_BUILTIN_LVXL_V16QI:
15776 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15777 exp, target, false);
15778 case ALTIVEC_BUILTIN_LVX_V2DF:
15779 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15780 exp, target, false);
15781 case ALTIVEC_BUILTIN_LVX_V2DI:
15782 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15783 exp, target, false);
15784 case ALTIVEC_BUILTIN_LVX_V4SF:
15785 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15786 exp, target, false);
15787 case ALTIVEC_BUILTIN_LVX:
15788 case ALTIVEC_BUILTIN_LVX_V4SI:
15789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15790 exp, target, false);
15791 case ALTIVEC_BUILTIN_LVX_V8HI:
15792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15793 exp, target, false);
15794 case ALTIVEC_BUILTIN_LVX_V16QI:
15795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15796 exp, target, false);
15797 case ALTIVEC_BUILTIN_LVLX:
15798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15799 exp, target, true);
15800 case ALTIVEC_BUILTIN_LVLXL:
15801 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15802 exp, target, true);
15803 case ALTIVEC_BUILTIN_LVRX:
15804 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15805 exp, target, true);
15806 case ALTIVEC_BUILTIN_LVRXL:
15807 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15808 exp, target, true);
15809 case VSX_BUILTIN_LXVD2X_V1TI:
15810 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15811 exp, target, false);
15812 case VSX_BUILTIN_LXVD2X_V2DF:
15813 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15814 exp, target, false);
15815 case VSX_BUILTIN_LXVD2X_V2DI:
15816 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15817 exp, target, false);
15818 case VSX_BUILTIN_LXVW4X_V4SF:
15819 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15820 exp, target, false);
15821 case VSX_BUILTIN_LXVW4X_V4SI:
15822 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15823 exp, target, false);
15824 case VSX_BUILTIN_LXVW4X_V8HI:
15825 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15826 exp, target, false);
15827 case VSX_BUILTIN_LXVW4X_V16QI:
15828 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15829 exp, target, false);
15830 /* For the following on big endian, it's ok to use any appropriate
15831 unaligned-supporting load, so use a generic expander. For
15832 little-endian, the exact element-reversing instruction must
15833 be used. */
15834 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15836 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15837 : CODE_FOR_vsx_ld_elemrev_v2df);
15838 return altivec_expand_lv_builtin (code, exp, target, false);
15840 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15842 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15843 : CODE_FOR_vsx_ld_elemrev_v2di);
15844 return altivec_expand_lv_builtin (code, exp, target, false);
15846 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15848 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15849 : CODE_FOR_vsx_ld_elemrev_v4sf);
15850 return altivec_expand_lv_builtin (code, exp, target, false);
15852 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15854 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15855 : CODE_FOR_vsx_ld_elemrev_v4si);
15856 return altivec_expand_lv_builtin (code, exp, target, false);
15858 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15860 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15861 : CODE_FOR_vsx_ld_elemrev_v8hi);
15862 return altivec_expand_lv_builtin (code, exp, target, false);
15864 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15866 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15867 : CODE_FOR_vsx_ld_elemrev_v16qi);
15868 return altivec_expand_lv_builtin (code, exp, target, false);
15870 break;
15871 default:
15872 break;
15873 /* Fall through. */
15876 *expandedp = false;
15877 return NULL_RTX;
15880 /* Expand the builtin in EXP and store the result in TARGET. Store
15881 true in *EXPANDEDP if we found a builtin to expand. */
15882 static rtx
15883 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15885 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15886 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15887 const struct builtin_description *d;
15888 size_t i;
15890 *expandedp = true;
15892 switch (fcode)
15894 case PAIRED_BUILTIN_STX:
15895 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15896 case PAIRED_BUILTIN_LX:
15897 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15898 default:
15899 break;
15900 /* Fall through. */
15903 /* Expand the paired predicates. */
15904 d = bdesc_paired_preds;
15905 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15906 if (d->code == fcode)
15907 return paired_expand_predicate_builtin (d->icode, exp, target);
15909 *expandedp = false;
15910 return NULL_RTX;
15913 static rtx
15914 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15916 rtx pat, scratch, tmp;
15917 tree form = CALL_EXPR_ARG (exp, 0);
15918 tree arg0 = CALL_EXPR_ARG (exp, 1);
15919 tree arg1 = CALL_EXPR_ARG (exp, 2);
15920 rtx op0 = expand_normal (arg0);
15921 rtx op1 = expand_normal (arg1);
15922 machine_mode mode0 = insn_data[icode].operand[1].mode;
15923 machine_mode mode1 = insn_data[icode].operand[2].mode;
15924 int form_int;
15925 enum rtx_code code;
15927 if (TREE_CODE (form) != INTEGER_CST)
15929 error ("argument 1 of %s must be a constant",
15930 "__builtin_paired_predicate");
15931 return const0_rtx;
15933 else
15934 form_int = TREE_INT_CST_LOW (form);
15936 gcc_assert (mode0 == mode1);
15938 if (arg0 == error_mark_node || arg1 == error_mark_node)
15939 return const0_rtx;
15941 if (target == 0
15942 || GET_MODE (target) != SImode
15943 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15944 target = gen_reg_rtx (SImode);
15945 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15946 op0 = copy_to_mode_reg (mode0, op0);
15947 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15948 op1 = copy_to_mode_reg (mode1, op1);
15950 scratch = gen_reg_rtx (CCFPmode);
15952 pat = GEN_FCN (icode) (scratch, op0, op1);
15953 if (!pat)
15954 return const0_rtx;
15956 emit_insn (pat);
15958 switch (form_int)
15960 /* LT bit. */
15961 case 0:
15962 code = LT;
15963 break;
15964 /* GT bit. */
15965 case 1:
15966 code = GT;
15967 break;
15968 /* EQ bit. */
15969 case 2:
15970 code = EQ;
15971 break;
15972 /* UN bit. */
15973 case 3:
15974 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15975 return target;
15976 default:
15977 error ("argument 1 of %qs is out of range",
15978 "__builtin_paired_predicate");
15979 return const0_rtx;
15982 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15983 emit_move_insn (target, tmp);
15984 return target;
15987 /* Raise an error message for a builtin function that is called without the
15988 appropriate target options being set. */
15990 static void
15991 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15993 size_t uns_fncode = (size_t) fncode;
15994 const char *name = rs6000_builtin_info[uns_fncode].name;
15995 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15997 gcc_assert (name != NULL);
15998 if ((fnmask & RS6000_BTM_CELL) != 0)
15999 error ("builtin function %qs is only valid for the cell processor", name);
16000 else if ((fnmask & RS6000_BTM_VSX) != 0)
16001 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16002 else if ((fnmask & RS6000_BTM_HTM) != 0)
16003 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16004 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16005 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16006 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16007 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16008 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16009 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16010 error ("builtin function %qs requires the %qs and %qs options",
16011 name, "-mhard-dfp", "-mpower8-vector");
16012 else if ((fnmask & RS6000_BTM_DFP) != 0)
16013 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16014 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16015 error ("builtin function %qs requires the %qs option", name,
16016 "-mpower8-vector");
16017 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16018 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16019 error ("builtin function %qs requires the %qs and %qs options",
16020 name, "-mcpu=power9", "-m64");
16021 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16022 error ("builtin function %qs requires the %qs option", name,
16023 "-mcpu=power9");
16024 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16025 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16026 error ("builtin function %qs requires the %qs and %qs options",
16027 name, "-mcpu=power9", "-m64");
16028 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16029 error ("builtin function %qs requires the %qs option", name,
16030 "-mcpu=power9");
16031 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16032 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16033 error ("builtin function %qs requires the %qs and %qs options",
16034 name, "-mhard-float", "-mlong-double-128");
16035 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16036 error ("builtin function %qs requires the %qs option", name,
16037 "-mhard-float");
16038 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
16039 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
16040 name);
16041 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16042 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16043 else
16044 error ("builtin function %qs is not supported with the current options",
16045 name);
16048 /* Target hook for early folding of built-ins, shamelessly stolen
16049 from ia64.c. */
16051 static tree
16052 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
16053 int n_args ATTRIBUTE_UNUSED,
16054 tree *args ATTRIBUTE_UNUSED,
16055 bool ignore ATTRIBUTE_UNUSED)
16057 #ifdef SUBTARGET_FOLD_BUILTIN
16058 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16059 #else
16060 return NULL_TREE;
16061 #endif
16064 /* Helper function to sort out which built-ins may be valid without having
16065 a LHS. */
16066 static bool
16067 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
16069 switch (fn_code)
16071 case ALTIVEC_BUILTIN_STVX_V16QI:
16072 case ALTIVEC_BUILTIN_STVX_V8HI:
16073 case ALTIVEC_BUILTIN_STVX_V4SI:
16074 case ALTIVEC_BUILTIN_STVX_V4SF:
16075 case ALTIVEC_BUILTIN_STVX_V2DI:
16076 case ALTIVEC_BUILTIN_STVX_V2DF:
16077 return true;
16078 default:
16079 return false;
16083 /* Helper function to handle the gimple folding of a vector compare
16084 operation. This sets up true/false vectors, and uses the
16085 VEC_COND_EXPR operation.
16086 CODE indicates which comparison is to be made. (EQ, GT, ...).
16087 TYPE indicates the type of the result. */
16088 static tree
16089 fold_build_vec_cmp (tree_code code, tree type,
16090 tree arg0, tree arg1)
16092 tree cmp_type = build_same_sized_truth_vector_type (type);
16093 tree zero_vec = build_zero_cst (type);
16094 tree minus_one_vec = build_minus_one_cst (type);
16095 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
16096 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
16099 /* Helper function to handle the in-between steps for the
16100 vector compare built-ins. */
16101 static void
16102 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
16104 tree arg0 = gimple_call_arg (stmt, 0);
16105 tree arg1 = gimple_call_arg (stmt, 1);
16106 tree lhs = gimple_call_lhs (stmt);
16107 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
16108 gimple *g = gimple_build_assign (lhs, cmp);
16109 gimple_set_location (g, gimple_location (stmt));
16110 gsi_replace (gsi, g, true);
16113 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16114 a constant, use rs6000_fold_builtin.) */
16116 bool
16117 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16119 gimple *stmt = gsi_stmt (*gsi);
16120 tree fndecl = gimple_call_fndecl (stmt);
16121 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16122 enum rs6000_builtins fn_code
16123 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16124 tree arg0, arg1, lhs;
16126 size_t uns_fncode = (size_t) fn_code;
16127 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16128 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16129 const char *fn_name2 = (icode != CODE_FOR_nothing)
16130 ? get_insn_name ((int) icode)
16131 : "nothing";
16133 if (TARGET_DEBUG_BUILTIN)
16134 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16135 fn_code, fn_name1, fn_name2);
16137 if (!rs6000_fold_gimple)
16138 return false;
16140 /* Prevent gimple folding for code that does not have a LHS, unless it is
16141 allowed per the rs6000_builtin_valid_without_lhs helper function. */
16142 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
16143 return false;
16145 switch (fn_code)
16147 /* Flavors of vec_add. We deliberately don't expand
16148 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16149 TImode, resulting in much poorer code generation. */
16150 case ALTIVEC_BUILTIN_VADDUBM:
16151 case ALTIVEC_BUILTIN_VADDUHM:
16152 case ALTIVEC_BUILTIN_VADDUWM:
16153 case P8V_BUILTIN_VADDUDM:
16154 case ALTIVEC_BUILTIN_VADDFP:
16155 case VSX_BUILTIN_XVADDDP:
16157 arg0 = gimple_call_arg (stmt, 0);
16158 arg1 = gimple_call_arg (stmt, 1);
16159 lhs = gimple_call_lhs (stmt);
16160 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16161 gimple_set_location (g, gimple_location (stmt));
16162 gsi_replace (gsi, g, true);
16163 return true;
16165 /* Flavors of vec_sub. We deliberately don't expand
16166 P8V_BUILTIN_VSUBUQM. */
16167 case ALTIVEC_BUILTIN_VSUBUBM:
16168 case ALTIVEC_BUILTIN_VSUBUHM:
16169 case ALTIVEC_BUILTIN_VSUBUWM:
16170 case P8V_BUILTIN_VSUBUDM:
16171 case ALTIVEC_BUILTIN_VSUBFP:
16172 case VSX_BUILTIN_XVSUBDP:
16174 arg0 = gimple_call_arg (stmt, 0);
16175 arg1 = gimple_call_arg (stmt, 1);
16176 lhs = gimple_call_lhs (stmt);
16177 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16178 gimple_set_location (g, gimple_location (stmt));
16179 gsi_replace (gsi, g, true);
16180 return true;
16182 case VSX_BUILTIN_XVMULSP:
16183 case VSX_BUILTIN_XVMULDP:
16185 arg0 = gimple_call_arg (stmt, 0);
16186 arg1 = gimple_call_arg (stmt, 1);
16187 lhs = gimple_call_lhs (stmt);
16188 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16189 gimple_set_location (g, gimple_location (stmt));
16190 gsi_replace (gsi, g, true);
16191 return true;
16193 /* Even element flavors of vec_mul (signed). */
16194 case ALTIVEC_BUILTIN_VMULESB:
16195 case ALTIVEC_BUILTIN_VMULESH:
16196 case ALTIVEC_BUILTIN_VMULESW:
16197 /* Even element flavors of vec_mul (unsigned). */
16198 case ALTIVEC_BUILTIN_VMULEUB:
16199 case ALTIVEC_BUILTIN_VMULEUH:
16200 case ALTIVEC_BUILTIN_VMULEUW:
16202 arg0 = gimple_call_arg (stmt, 0);
16203 arg1 = gimple_call_arg (stmt, 1);
16204 lhs = gimple_call_lhs (stmt);
16205 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16206 gimple_set_location (g, gimple_location (stmt));
16207 gsi_replace (gsi, g, true);
16208 return true;
16210 /* Odd element flavors of vec_mul (signed). */
16211 case ALTIVEC_BUILTIN_VMULOSB:
16212 case ALTIVEC_BUILTIN_VMULOSH:
16213 case ALTIVEC_BUILTIN_VMULOSW:
16214 /* Odd element flavors of vec_mul (unsigned). */
16215 case ALTIVEC_BUILTIN_VMULOUB:
16216 case ALTIVEC_BUILTIN_VMULOUH:
16217 case ALTIVEC_BUILTIN_VMULOUW:
16219 arg0 = gimple_call_arg (stmt, 0);
16220 arg1 = gimple_call_arg (stmt, 1);
16221 lhs = gimple_call_lhs (stmt);
16222 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16223 gimple_set_location (g, gimple_location (stmt));
16224 gsi_replace (gsi, g, true);
16225 return true;
16227 /* Flavors of vec_div (Integer). */
16228 case VSX_BUILTIN_DIV_V2DI:
16229 case VSX_BUILTIN_UDIV_V2DI:
16231 arg0 = gimple_call_arg (stmt, 0);
16232 arg1 = gimple_call_arg (stmt, 1);
16233 lhs = gimple_call_lhs (stmt);
16234 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16235 gimple_set_location (g, gimple_location (stmt));
16236 gsi_replace (gsi, g, true);
16237 return true;
16239 /* Flavors of vec_div (Float). */
16240 case VSX_BUILTIN_XVDIVSP:
16241 case VSX_BUILTIN_XVDIVDP:
16243 arg0 = gimple_call_arg (stmt, 0);
16244 arg1 = gimple_call_arg (stmt, 1);
16245 lhs = gimple_call_lhs (stmt);
16246 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16247 gimple_set_location (g, gimple_location (stmt));
16248 gsi_replace (gsi, g, true);
16249 return true;
16251 /* Flavors of vec_and. */
16252 case ALTIVEC_BUILTIN_VAND:
16254 arg0 = gimple_call_arg (stmt, 0);
16255 arg1 = gimple_call_arg (stmt, 1);
16256 lhs = gimple_call_lhs (stmt);
16257 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16258 gimple_set_location (g, gimple_location (stmt));
16259 gsi_replace (gsi, g, true);
16260 return true;
16262 /* Flavors of vec_andc. */
16263 case ALTIVEC_BUILTIN_VANDC:
16265 arg0 = gimple_call_arg (stmt, 0);
16266 arg1 = gimple_call_arg (stmt, 1);
16267 lhs = gimple_call_lhs (stmt);
16268 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16269 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16270 gimple_set_location (g, gimple_location (stmt));
16271 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16272 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16273 gimple_set_location (g, gimple_location (stmt));
16274 gsi_replace (gsi, g, true);
16275 return true;
16277 /* Flavors of vec_nand. */
16278 case P8V_BUILTIN_VEC_NAND:
16279 case P8V_BUILTIN_NAND_V16QI:
16280 case P8V_BUILTIN_NAND_V8HI:
16281 case P8V_BUILTIN_NAND_V4SI:
16282 case P8V_BUILTIN_NAND_V4SF:
16283 case P8V_BUILTIN_NAND_V2DF:
16284 case P8V_BUILTIN_NAND_V2DI:
16286 arg0 = gimple_call_arg (stmt, 0);
16287 arg1 = gimple_call_arg (stmt, 1);
16288 lhs = gimple_call_lhs (stmt);
16289 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16290 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16291 gimple_set_location (g, gimple_location (stmt));
16292 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16293 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16294 gimple_set_location (g, gimple_location (stmt));
16295 gsi_replace (gsi, g, true);
16296 return true;
16298 /* Flavors of vec_or. */
16299 case ALTIVEC_BUILTIN_VOR:
16301 arg0 = gimple_call_arg (stmt, 0);
16302 arg1 = gimple_call_arg (stmt, 1);
16303 lhs = gimple_call_lhs (stmt);
16304 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16305 gimple_set_location (g, gimple_location (stmt));
16306 gsi_replace (gsi, g, true);
16307 return true;
16309 /* flavors of vec_orc. */
16310 case P8V_BUILTIN_ORC_V16QI:
16311 case P8V_BUILTIN_ORC_V8HI:
16312 case P8V_BUILTIN_ORC_V4SI:
16313 case P8V_BUILTIN_ORC_V4SF:
16314 case P8V_BUILTIN_ORC_V2DF:
16315 case P8V_BUILTIN_ORC_V2DI:
16317 arg0 = gimple_call_arg (stmt, 0);
16318 arg1 = gimple_call_arg (stmt, 1);
16319 lhs = gimple_call_lhs (stmt);
16320 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16321 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16322 gimple_set_location (g, gimple_location (stmt));
16323 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16324 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16325 gimple_set_location (g, gimple_location (stmt));
16326 gsi_replace (gsi, g, true);
16327 return true;
16329 /* Flavors of vec_xor. */
16330 case ALTIVEC_BUILTIN_VXOR:
16332 arg0 = gimple_call_arg (stmt, 0);
16333 arg1 = gimple_call_arg (stmt, 1);
16334 lhs = gimple_call_lhs (stmt);
16335 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16336 gimple_set_location (g, gimple_location (stmt));
16337 gsi_replace (gsi, g, true);
16338 return true;
16340 /* Flavors of vec_nor. */
16341 case ALTIVEC_BUILTIN_VNOR:
16343 arg0 = gimple_call_arg (stmt, 0);
16344 arg1 = gimple_call_arg (stmt, 1);
16345 lhs = gimple_call_lhs (stmt);
16346 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16347 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16348 gimple_set_location (g, gimple_location (stmt));
16349 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16350 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16351 gimple_set_location (g, gimple_location (stmt));
16352 gsi_replace (gsi, g, true);
16353 return true;
16355 /* flavors of vec_abs. */
16356 case ALTIVEC_BUILTIN_ABS_V16QI:
16357 case ALTIVEC_BUILTIN_ABS_V8HI:
16358 case ALTIVEC_BUILTIN_ABS_V4SI:
16359 case ALTIVEC_BUILTIN_ABS_V4SF:
16360 case P8V_BUILTIN_ABS_V2DI:
16361 case VSX_BUILTIN_XVABSDP:
16363 arg0 = gimple_call_arg (stmt, 0);
16364 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16365 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16366 return false;
16367 lhs = gimple_call_lhs (stmt);
16368 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16369 gimple_set_location (g, gimple_location (stmt));
16370 gsi_replace (gsi, g, true);
16371 return true;
16373 /* flavors of vec_min. */
16374 case VSX_BUILTIN_XVMINDP:
16375 case P8V_BUILTIN_VMINSD:
16376 case P8V_BUILTIN_VMINUD:
16377 case ALTIVEC_BUILTIN_VMINSB:
16378 case ALTIVEC_BUILTIN_VMINSH:
16379 case ALTIVEC_BUILTIN_VMINSW:
16380 case ALTIVEC_BUILTIN_VMINUB:
16381 case ALTIVEC_BUILTIN_VMINUH:
16382 case ALTIVEC_BUILTIN_VMINUW:
16383 case ALTIVEC_BUILTIN_VMINFP:
16385 arg0 = gimple_call_arg (stmt, 0);
16386 arg1 = gimple_call_arg (stmt, 1);
16387 lhs = gimple_call_lhs (stmt);
16388 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16389 gimple_set_location (g, gimple_location (stmt));
16390 gsi_replace (gsi, g, true);
16391 return true;
16393 /* flavors of vec_max. */
16394 case VSX_BUILTIN_XVMAXDP:
16395 case P8V_BUILTIN_VMAXSD:
16396 case P8V_BUILTIN_VMAXUD:
16397 case ALTIVEC_BUILTIN_VMAXSB:
16398 case ALTIVEC_BUILTIN_VMAXSH:
16399 case ALTIVEC_BUILTIN_VMAXSW:
16400 case ALTIVEC_BUILTIN_VMAXUB:
16401 case ALTIVEC_BUILTIN_VMAXUH:
16402 case ALTIVEC_BUILTIN_VMAXUW:
16403 case ALTIVEC_BUILTIN_VMAXFP:
16405 arg0 = gimple_call_arg (stmt, 0);
16406 arg1 = gimple_call_arg (stmt, 1);
16407 lhs = gimple_call_lhs (stmt);
16408 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16409 gimple_set_location (g, gimple_location (stmt));
16410 gsi_replace (gsi, g, true);
16411 return true;
16413 /* Flavors of vec_eqv. */
16414 case P8V_BUILTIN_EQV_V16QI:
16415 case P8V_BUILTIN_EQV_V8HI:
16416 case P8V_BUILTIN_EQV_V4SI:
16417 case P8V_BUILTIN_EQV_V4SF:
16418 case P8V_BUILTIN_EQV_V2DF:
16419 case P8V_BUILTIN_EQV_V2DI:
16421 arg0 = gimple_call_arg (stmt, 0);
16422 arg1 = gimple_call_arg (stmt, 1);
16423 lhs = gimple_call_lhs (stmt);
16424 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16425 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16426 gimple_set_location (g, gimple_location (stmt));
16427 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16428 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16429 gimple_set_location (g, gimple_location (stmt));
16430 gsi_replace (gsi, g, true);
16431 return true;
16433 /* Flavors of vec_rotate_left. */
16434 case ALTIVEC_BUILTIN_VRLB:
16435 case ALTIVEC_BUILTIN_VRLH:
16436 case ALTIVEC_BUILTIN_VRLW:
16437 case P8V_BUILTIN_VRLD:
16439 arg0 = gimple_call_arg (stmt, 0);
16440 arg1 = gimple_call_arg (stmt, 1);
16441 lhs = gimple_call_lhs (stmt);
16442 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16443 gimple_set_location (g, gimple_location (stmt));
16444 gsi_replace (gsi, g, true);
16445 return true;
16447 /* Flavors of vector shift right algebraic.
16448 vec_sra{b,h,w} -> vsra{b,h,w}. */
16449 case ALTIVEC_BUILTIN_VSRAB:
16450 case ALTIVEC_BUILTIN_VSRAH:
16451 case ALTIVEC_BUILTIN_VSRAW:
16452 case P8V_BUILTIN_VSRAD:
16454 arg0 = gimple_call_arg (stmt, 0);
16455 arg1 = gimple_call_arg (stmt, 1);
16456 lhs = gimple_call_lhs (stmt);
16457 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16458 gimple_set_location (g, gimple_location (stmt));
16459 gsi_replace (gsi, g, true);
16460 return true;
16462 /* Flavors of vector shift left.
16463 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16464 case ALTIVEC_BUILTIN_VSLB:
16465 case ALTIVEC_BUILTIN_VSLH:
16466 case ALTIVEC_BUILTIN_VSLW:
16467 case P8V_BUILTIN_VSLD:
16469 arg0 = gimple_call_arg (stmt, 0);
16470 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16471 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16472 return false;
16473 arg1 = gimple_call_arg (stmt, 1);
16474 lhs = gimple_call_lhs (stmt);
16475 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16476 gimple_set_location (g, gimple_location (stmt));
16477 gsi_replace (gsi, g, true);
16478 return true;
16480 /* Flavors of vector shift right. */
16481 case ALTIVEC_BUILTIN_VSRB:
16482 case ALTIVEC_BUILTIN_VSRH:
16483 case ALTIVEC_BUILTIN_VSRW:
16484 case P8V_BUILTIN_VSRD:
16486 arg0 = gimple_call_arg (stmt, 0);
16487 arg1 = gimple_call_arg (stmt, 1);
16488 lhs = gimple_call_lhs (stmt);
16489 gimple_seq stmts = NULL;
16490 /* Convert arg0 to unsigned. */
16491 tree arg0_unsigned
16492 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16493 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16494 tree res
16495 = gimple_build (&stmts, RSHIFT_EXPR,
16496 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16497 /* Convert result back to the lhs type. */
16498 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16499 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16500 update_call_from_tree (gsi, res);
16501 return true;
16503 /* Vector loads. */
16504 case ALTIVEC_BUILTIN_LVX_V16QI:
16505 case ALTIVEC_BUILTIN_LVX_V8HI:
16506 case ALTIVEC_BUILTIN_LVX_V4SI:
16507 case ALTIVEC_BUILTIN_LVX_V4SF:
16508 case ALTIVEC_BUILTIN_LVX_V2DI:
16509 case ALTIVEC_BUILTIN_LVX_V2DF:
16511 arg0 = gimple_call_arg (stmt, 0); // offset
16512 arg1 = gimple_call_arg (stmt, 1); // address
16513 /* Do not fold for -maltivec=be on LE targets. */
16514 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16515 return false;
16516 lhs = gimple_call_lhs (stmt);
16517 location_t loc = gimple_location (stmt);
16518 /* Since arg1 may be cast to a different type, just use ptr_type_node
16519 here instead of trying to enforce TBAA on pointer types. */
16520 tree arg1_type = ptr_type_node;
16521 tree lhs_type = TREE_TYPE (lhs);
16522 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16523 the tree using the value from arg0. The resulting type will match
16524 the type of arg1. */
16525 gimple_seq stmts = NULL;
16526 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16527 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16528 arg1_type, arg1, temp_offset);
16529 /* Mask off any lower bits from the address. */
16530 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16531 arg1_type, temp_addr,
16532 build_int_cst (arg1_type, -16));
16533 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16534 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16535 take an offset, but since we've already incorporated the offset
16536 above, here we just pass in a zero. */
16537 gimple *g;
16538 g = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
16539 build_int_cst (arg1_type, 0)));
16540 gimple_set_location (g, loc);
16541 gsi_replace (gsi, g, true);
16542 return true;
16544 /* Vector stores. */
16545 case ALTIVEC_BUILTIN_STVX_V16QI:
16546 case ALTIVEC_BUILTIN_STVX_V8HI:
16547 case ALTIVEC_BUILTIN_STVX_V4SI:
16548 case ALTIVEC_BUILTIN_STVX_V4SF:
16549 case ALTIVEC_BUILTIN_STVX_V2DI:
16550 case ALTIVEC_BUILTIN_STVX_V2DF:
16552 /* Do not fold for -maltivec=be on LE targets. */
16553 if (VECTOR_ELT_ORDER_BIG && !BYTES_BIG_ENDIAN)
16554 return false;
16555 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16556 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16557 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16558 location_t loc = gimple_location (stmt);
16559 tree arg0_type = TREE_TYPE (arg0);
16560 /* Use ptr_type_node (no TBAA) for the arg2_type.
16561 FIXME: (Richard) "A proper fix would be to transition this type as
16562 seen from the frontend to GIMPLE, for example in a similar way we
16563 do for MEM_REFs by piggy-backing that on an extra argument, a
16564 constant zero pointer of the alias pointer type to use (which would
16565 also serve as a type indicator of the store itself). I'd use a
16566 target specific internal function for this (not sure if we can have
16567 those target specific, but I guess if it's folded away then that's
16568 fine) and get away with the overload set."
16570 tree arg2_type = ptr_type_node;
16571 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16572 the tree using the value from arg0. The resulting type will match
16573 the type of arg2. */
16574 gimple_seq stmts = NULL;
16575 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16576 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16577 arg2_type, arg2, temp_offset);
16578 /* Mask off any lower bits from the address. */
16579 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
16580 arg2_type, temp_addr,
16581 build_int_cst (arg2_type, -16));
16582 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16583 /* The desired gimple result should be similar to:
16584 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
16585 gimple *g;
16586 g = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
16587 build_int_cst (arg2_type, 0)), arg0);
16588 gimple_set_location (g, loc);
16589 gsi_replace (gsi, g, true);
16590 return true;
16593 /* Vector Fused multiply-add (fma). */
16594 case ALTIVEC_BUILTIN_VMADDFP:
16595 case VSX_BUILTIN_XVMADDDP:
16596 case ALTIVEC_BUILTIN_VMLADDUHM:
16598 arg0 = gimple_call_arg (stmt, 0);
16599 arg1 = gimple_call_arg (stmt, 1);
16600 tree arg2 = gimple_call_arg (stmt, 2);
16601 lhs = gimple_call_lhs (stmt);
16602 gimple *g = gimple_build_assign (lhs, FMA_EXPR , arg0, arg1, arg2);
16603 gimple_set_location (g, gimple_location (stmt));
16604 gsi_replace (gsi, g, true);
16605 return true;
16608 /* Vector compares; EQ, NE, GE, GT, LE. */
16609 case ALTIVEC_BUILTIN_VCMPEQUB:
16610 case ALTIVEC_BUILTIN_VCMPEQUH:
16611 case ALTIVEC_BUILTIN_VCMPEQUW:
16612 case P8V_BUILTIN_VCMPEQUD:
16613 fold_compare_helper (gsi, EQ_EXPR, stmt);
16614 return true;
16616 case P9V_BUILTIN_CMPNEB:
16617 case P9V_BUILTIN_CMPNEH:
16618 case P9V_BUILTIN_CMPNEW:
16619 fold_compare_helper (gsi, NE_EXPR, stmt);
16620 return true;
16622 case VSX_BUILTIN_CMPGE_16QI:
16623 case VSX_BUILTIN_CMPGE_U16QI:
16624 case VSX_BUILTIN_CMPGE_8HI:
16625 case VSX_BUILTIN_CMPGE_U8HI:
16626 case VSX_BUILTIN_CMPGE_4SI:
16627 case VSX_BUILTIN_CMPGE_U4SI:
16628 case VSX_BUILTIN_CMPGE_2DI:
16629 case VSX_BUILTIN_CMPGE_U2DI:
16630 fold_compare_helper (gsi, GE_EXPR, stmt);
16631 return true;
16633 case ALTIVEC_BUILTIN_VCMPGTSB:
16634 case ALTIVEC_BUILTIN_VCMPGTUB:
16635 case ALTIVEC_BUILTIN_VCMPGTSH:
16636 case ALTIVEC_BUILTIN_VCMPGTUH:
16637 case ALTIVEC_BUILTIN_VCMPGTSW:
16638 case ALTIVEC_BUILTIN_VCMPGTUW:
16639 case P8V_BUILTIN_VCMPGTUD:
16640 case P8V_BUILTIN_VCMPGTSD:
16641 fold_compare_helper (gsi, GT_EXPR, stmt);
16642 return true;
16644 case VSX_BUILTIN_CMPLE_16QI:
16645 case VSX_BUILTIN_CMPLE_U16QI:
16646 case VSX_BUILTIN_CMPLE_8HI:
16647 case VSX_BUILTIN_CMPLE_U8HI:
16648 case VSX_BUILTIN_CMPLE_4SI:
16649 case VSX_BUILTIN_CMPLE_U4SI:
16650 case VSX_BUILTIN_CMPLE_2DI:
16651 case VSX_BUILTIN_CMPLE_U2DI:
16652 fold_compare_helper (gsi, LE_EXPR, stmt);
16653 return true;
16655 default:
16656 if (TARGET_DEBUG_BUILTIN)
16657 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16658 fn_code, fn_name1, fn_name2);
16659 break;
16662 return false;
16665 /* Expand an expression EXP that calls a built-in function,
16666 with result going to TARGET if that's convenient
16667 (and in mode MODE if that's convenient).
16668 SUBTARGET may be used as the target for computing one of EXP's operands.
16669 IGNORE is nonzero if the value is to be ignored. */
16671 static rtx
16672 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16673 machine_mode mode ATTRIBUTE_UNUSED,
16674 int ignore ATTRIBUTE_UNUSED)
16676 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16677 enum rs6000_builtins fcode
16678 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16679 size_t uns_fcode = (size_t)fcode;
16680 const struct builtin_description *d;
16681 size_t i;
16682 rtx ret;
16683 bool success;
16684 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16685 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16686 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16688 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16689 floating point type, depending on whether long double is the IBM extended
16690 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16691 we only define one variant of the built-in function, and switch the code
16692 when defining it, rather than defining two built-ins and using the
16693 overload table in rs6000-c.c to switch between the two. */
16694 if (FLOAT128_IEEE_P (TFmode))
16695 switch (icode)
16697 default:
16698 break;
16700 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16701 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16702 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16703 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16704 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16705 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16706 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16707 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16708 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16709 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16710 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16711 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16712 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16715 if (TARGET_DEBUG_BUILTIN)
16717 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16718 const char *name2 = (icode != CODE_FOR_nothing)
16719 ? get_insn_name ((int) icode)
16720 : "nothing";
16721 const char *name3;
16723 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16725 default: name3 = "unknown"; break;
16726 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16727 case RS6000_BTC_UNARY: name3 = "unary"; break;
16728 case RS6000_BTC_BINARY: name3 = "binary"; break;
16729 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16730 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16731 case RS6000_BTC_ABS: name3 = "abs"; break;
16732 case RS6000_BTC_DST: name3 = "dst"; break;
16736 fprintf (stderr,
16737 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16738 (name1) ? name1 : "---", fcode,
16739 (name2) ? name2 : "---", (int) icode,
16740 name3,
16741 func_valid_p ? "" : ", not valid");
16744 if (!func_valid_p)
16746 rs6000_invalid_builtin (fcode);
16748 /* Given it is invalid, just generate a normal call. */
16749 return expand_call (exp, target, ignore);
16752 switch (fcode)
16754 case RS6000_BUILTIN_RECIP:
16755 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16757 case RS6000_BUILTIN_RECIPF:
16758 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16760 case RS6000_BUILTIN_RSQRTF:
16761 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16763 case RS6000_BUILTIN_RSQRT:
16764 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16766 case POWER7_BUILTIN_BPERMD:
16767 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16768 ? CODE_FOR_bpermd_di
16769 : CODE_FOR_bpermd_si), exp, target);
16771 case RS6000_BUILTIN_GET_TB:
16772 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16773 target);
16775 case RS6000_BUILTIN_MFTB:
16776 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16777 ? CODE_FOR_rs6000_mftb_di
16778 : CODE_FOR_rs6000_mftb_si),
16779 target);
16781 case RS6000_BUILTIN_MFFS:
16782 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16784 case RS6000_BUILTIN_MTFSF:
16785 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16787 case RS6000_BUILTIN_CPU_INIT:
16788 case RS6000_BUILTIN_CPU_IS:
16789 case RS6000_BUILTIN_CPU_SUPPORTS:
16790 return cpu_expand_builtin (fcode, exp, target);
16792 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16793 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16795 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16796 : (int) CODE_FOR_altivec_lvsl_direct);
16797 machine_mode tmode = insn_data[icode2].operand[0].mode;
16798 machine_mode mode = insn_data[icode2].operand[1].mode;
16799 tree arg;
16800 rtx op, addr, pat;
16802 gcc_assert (TARGET_ALTIVEC);
16804 arg = CALL_EXPR_ARG (exp, 0);
16805 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16806 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16807 addr = memory_address (mode, op);
16808 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16809 op = addr;
16810 else
16812 /* For the load case need to negate the address. */
16813 op = gen_reg_rtx (GET_MODE (addr));
16814 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16816 op = gen_rtx_MEM (mode, op);
16818 if (target == 0
16819 || GET_MODE (target) != tmode
16820 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16821 target = gen_reg_rtx (tmode);
16823 pat = GEN_FCN (icode2) (target, op);
16824 if (!pat)
16825 return 0;
16826 emit_insn (pat);
16828 return target;
16831 case ALTIVEC_BUILTIN_VCFUX:
16832 case ALTIVEC_BUILTIN_VCFSX:
16833 case ALTIVEC_BUILTIN_VCTUXS:
16834 case ALTIVEC_BUILTIN_VCTSXS:
16835 /* FIXME: There's got to be a nicer way to handle this case than
16836 constructing a new CALL_EXPR. */
16837 if (call_expr_nargs (exp) == 1)
16839 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16840 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16842 break;
16844 default:
16845 break;
16848 if (TARGET_ALTIVEC)
16850 ret = altivec_expand_builtin (exp, target, &success);
16852 if (success)
16853 return ret;
16855 if (TARGET_PAIRED_FLOAT)
16857 ret = paired_expand_builtin (exp, target, &success);
16859 if (success)
16860 return ret;
16862 if (TARGET_HTM)
16864 ret = htm_expand_builtin (exp, target, &success);
16866 if (success)
16867 return ret;
16870 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16871 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16872 gcc_assert (attr == RS6000_BTC_UNARY
16873 || attr == RS6000_BTC_BINARY
16874 || attr == RS6000_BTC_TERNARY
16875 || attr == RS6000_BTC_SPECIAL);
16877 /* Handle simple unary operations. */
16878 d = bdesc_1arg;
16879 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16880 if (d->code == fcode)
16881 return rs6000_expand_unop_builtin (icode, exp, target);
16883 /* Handle simple binary operations. */
16884 d = bdesc_2arg;
16885 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16886 if (d->code == fcode)
16887 return rs6000_expand_binop_builtin (icode, exp, target);
16889 /* Handle simple ternary operations. */
16890 d = bdesc_3arg;
16891 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16892 if (d->code == fcode)
16893 return rs6000_expand_ternop_builtin (icode, exp, target);
16895 /* Handle simple no-argument operations. */
16896 d = bdesc_0arg;
16897 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16898 if (d->code == fcode)
16899 return rs6000_expand_zeroop_builtin (icode, target);
16901 gcc_unreachable ();
16904 /* Create a builtin vector type with a name. Taking care not to give
16905 the canonical type a name. */
16907 static tree
16908 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16910 tree result = build_vector_type (elt_type, num_elts);
16912 /* Copy so we don't give the canonical type a name. */
16913 result = build_variant_type_copy (result);
16915 add_builtin_type (name, result);
16917 return result;
16920 static void
16921 rs6000_init_builtins (void)
16923 tree tdecl;
16924 tree ftype;
16925 machine_mode mode;
16927 if (TARGET_DEBUG_BUILTIN)
16928 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16929 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16930 (TARGET_ALTIVEC) ? ", altivec" : "",
16931 (TARGET_VSX) ? ", vsx" : "");
16933 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16934 V2SF_type_node = build_vector_type (float_type_node, 2);
16935 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16936 : "__vector long long",
16937 intDI_type_node, 2);
16938 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16939 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16940 intSI_type_node, 4);
16941 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16942 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16943 intHI_type_node, 8);
16944 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16945 intQI_type_node, 16);
16947 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16948 unsigned_intQI_type_node, 16);
16949 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16950 unsigned_intHI_type_node, 8);
16951 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16952 unsigned_intSI_type_node, 4);
16953 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16954 ? "__vector unsigned long"
16955 : "__vector unsigned long long",
16956 unsigned_intDI_type_node, 2);
16958 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16959 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16960 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16961 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16963 const_str_type_node
16964 = build_pointer_type (build_qualified_type (char_type_node,
16965 TYPE_QUAL_CONST));
16967 /* We use V1TI mode as a special container to hold __int128_t items that
16968 must live in VSX registers. */
16969 if (intTI_type_node)
16971 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16972 intTI_type_node, 1);
16973 unsigned_V1TI_type_node
16974 = rs6000_vector_type ("__vector unsigned __int128",
16975 unsigned_intTI_type_node, 1);
16978 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16979 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16980 'vector unsigned short'. */
16982 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16983 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16984 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16985 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16986 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16988 long_integer_type_internal_node = long_integer_type_node;
16989 long_unsigned_type_internal_node = long_unsigned_type_node;
16990 long_long_integer_type_internal_node = long_long_integer_type_node;
16991 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16992 intQI_type_internal_node = intQI_type_node;
16993 uintQI_type_internal_node = unsigned_intQI_type_node;
16994 intHI_type_internal_node = intHI_type_node;
16995 uintHI_type_internal_node = unsigned_intHI_type_node;
16996 intSI_type_internal_node = intSI_type_node;
16997 uintSI_type_internal_node = unsigned_intSI_type_node;
16998 intDI_type_internal_node = intDI_type_node;
16999 uintDI_type_internal_node = unsigned_intDI_type_node;
17000 intTI_type_internal_node = intTI_type_node;
17001 uintTI_type_internal_node = unsigned_intTI_type_node;
17002 float_type_internal_node = float_type_node;
17003 double_type_internal_node = double_type_node;
17004 long_double_type_internal_node = long_double_type_node;
17005 dfloat64_type_internal_node = dfloat64_type_node;
17006 dfloat128_type_internal_node = dfloat128_type_node;
17007 void_type_internal_node = void_type_node;
17009 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17010 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17011 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17012 format that uses a pair of doubles, depending on the switches and
17013 defaults.
17015 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17016 floating point, we need make sure the type is non-zero or else self-test
17017 fails during bootstrap.
17019 We don't register a built-in type for __ibm128 if the type is the same as
17020 long double. Instead we add a #define for __ibm128 in
17021 rs6000_cpu_cpp_builtins to long double.
17023 For IEEE 128-bit floating point, always create the type __ieee128. If the
17024 user used -mfloat128, rs6000-c.c will create a define from __float128 to
17025 __ieee128. */
17026 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17028 ibm128_float_type_node = make_node (REAL_TYPE);
17029 TYPE_PRECISION (ibm128_float_type_node) = 128;
17030 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17031 layout_type (ibm128_float_type_node);
17033 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17034 "__ibm128");
17036 else
17037 ibm128_float_type_node = long_double_type_node;
17039 if (TARGET_FLOAT128_TYPE)
17041 ieee128_float_type_node = float128_type_node;
17042 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17043 "__ieee128");
17046 else
17047 ieee128_float_type_node = long_double_type_node;
17049 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17050 tree type node. */
17051 builtin_mode_to_type[QImode][0] = integer_type_node;
17052 builtin_mode_to_type[HImode][0] = integer_type_node;
17053 builtin_mode_to_type[SImode][0] = intSI_type_node;
17054 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17055 builtin_mode_to_type[DImode][0] = intDI_type_node;
17056 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17057 builtin_mode_to_type[TImode][0] = intTI_type_node;
17058 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17059 builtin_mode_to_type[SFmode][0] = float_type_node;
17060 builtin_mode_to_type[DFmode][0] = double_type_node;
17061 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17062 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17063 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17064 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17065 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17066 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17067 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17068 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17069 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17070 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17071 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17072 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17073 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17074 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17075 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17076 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17077 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17078 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17079 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17081 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17082 TYPE_NAME (bool_char_type_node) = tdecl;
17084 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17085 TYPE_NAME (bool_short_type_node) = tdecl;
17087 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17088 TYPE_NAME (bool_int_type_node) = tdecl;
17090 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17091 TYPE_NAME (pixel_type_node) = tdecl;
17093 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
17094 bool_char_type_node, 16);
17095 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
17096 bool_short_type_node, 8);
17097 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
17098 bool_int_type_node, 4);
17099 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
17100 ? "__vector __bool long"
17101 : "__vector __bool long long",
17102 bool_long_type_node, 2);
17103 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
17104 pixel_type_node, 8);
17106 /* Paired builtins are only available if you build a compiler with the
17107 appropriate options, so only create those builtins with the appropriate
17108 compiler option. Create Altivec and VSX builtins on machines with at
17109 least the general purpose extensions (970 and newer) to allow the use of
17110 the target attribute. */
17111 if (TARGET_PAIRED_FLOAT)
17112 paired_init_builtins ();
17113 if (TARGET_EXTRA_BUILTINS)
17114 altivec_init_builtins ();
17115 if (TARGET_HTM)
17116 htm_init_builtins ();
17118 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
17119 rs6000_common_init_builtins ();
17121 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17122 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17123 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17125 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17126 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17127 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17129 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17130 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17131 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17133 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17134 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17135 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17137 mode = (TARGET_64BIT) ? DImode : SImode;
17138 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17139 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17140 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17142 ftype = build_function_type_list (unsigned_intDI_type_node,
17143 NULL_TREE);
17144 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17146 if (TARGET_64BIT)
17147 ftype = build_function_type_list (unsigned_intDI_type_node,
17148 NULL_TREE);
17149 else
17150 ftype = build_function_type_list (unsigned_intSI_type_node,
17151 NULL_TREE);
17152 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17154 ftype = build_function_type_list (double_type_node, NULL_TREE);
17155 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17157 ftype = build_function_type_list (void_type_node,
17158 intSI_type_node, double_type_node,
17159 NULL_TREE);
17160 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17162 ftype = build_function_type_list (void_type_node, NULL_TREE);
17163 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17165 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17166 NULL_TREE);
17167 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17168 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17170 /* AIX libm provides clog as __clog. */
17171 if (TARGET_XCOFF &&
17172 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17173 set_user_assembler_name (tdecl, "__clog");
17175 #ifdef SUBTARGET_INIT_BUILTINS
17176 SUBTARGET_INIT_BUILTINS;
17177 #endif
17180 /* Returns the rs6000 builtin decl for CODE. */
17182 static tree
17183 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17185 HOST_WIDE_INT fnmask;
17187 if (code >= RS6000_BUILTIN_COUNT)
17188 return error_mark_node;
17190 fnmask = rs6000_builtin_info[code].mask;
17191 if ((fnmask & rs6000_builtin_mask) != fnmask)
17193 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17194 return error_mark_node;
17197 return rs6000_builtin_decls[code];
17200 static void
17201 paired_init_builtins (void)
17203 const struct builtin_description *d;
17204 size_t i;
17205 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17207 tree int_ftype_int_v2sf_v2sf
17208 = build_function_type_list (integer_type_node,
17209 integer_type_node,
17210 V2SF_type_node,
17211 V2SF_type_node,
17212 NULL_TREE);
17213 tree pcfloat_type_node =
17214 build_pointer_type (build_qualified_type
17215 (float_type_node, TYPE_QUAL_CONST));
17217 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17218 long_integer_type_node,
17219 pcfloat_type_node,
17220 NULL_TREE);
17221 tree void_ftype_v2sf_long_pcfloat =
17222 build_function_type_list (void_type_node,
17223 V2SF_type_node,
17224 long_integer_type_node,
17225 pcfloat_type_node,
17226 NULL_TREE);
17229 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17230 PAIRED_BUILTIN_LX);
17233 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17234 PAIRED_BUILTIN_STX);
17236 /* Predicates. */
17237 d = bdesc_paired_preds;
17238 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17240 tree type;
17241 HOST_WIDE_INT mask = d->mask;
17243 if ((mask & builtin_mask) != mask)
17245 if (TARGET_DEBUG_BUILTIN)
17246 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17247 d->name);
17248 continue;
17251 /* Cannot define builtin if the instruction is disabled. */
17252 gcc_assert (d->icode != CODE_FOR_nothing);
17254 if (TARGET_DEBUG_BUILTIN)
17255 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17256 (int)i, get_insn_name (d->icode), (int)d->icode,
17257 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17259 switch (insn_data[d->icode].operand[1].mode)
17261 case E_V2SFmode:
17262 type = int_ftype_int_v2sf_v2sf;
17263 break;
17264 default:
17265 gcc_unreachable ();
17268 def_builtin (d->name, type, d->code);
17272 static void
17273 altivec_init_builtins (void)
17275 const struct builtin_description *d;
17276 size_t i;
17277 tree ftype;
17278 tree decl;
17279 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17281 tree pvoid_type_node = build_pointer_type (void_type_node);
17283 tree pcvoid_type_node
17284 = build_pointer_type (build_qualified_type (void_type_node,
17285 TYPE_QUAL_CONST));
17287 tree int_ftype_opaque
17288 = build_function_type_list (integer_type_node,
17289 opaque_V4SI_type_node, NULL_TREE);
17290 tree opaque_ftype_opaque
17291 = build_function_type_list (integer_type_node, NULL_TREE);
17292 tree opaque_ftype_opaque_int
17293 = build_function_type_list (opaque_V4SI_type_node,
17294 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17295 tree opaque_ftype_opaque_opaque_int
17296 = build_function_type_list (opaque_V4SI_type_node,
17297 opaque_V4SI_type_node, opaque_V4SI_type_node,
17298 integer_type_node, NULL_TREE);
17299 tree opaque_ftype_opaque_opaque_opaque
17300 = build_function_type_list (opaque_V4SI_type_node,
17301 opaque_V4SI_type_node, opaque_V4SI_type_node,
17302 opaque_V4SI_type_node, NULL_TREE);
17303 tree opaque_ftype_opaque_opaque
17304 = build_function_type_list (opaque_V4SI_type_node,
17305 opaque_V4SI_type_node, opaque_V4SI_type_node,
17306 NULL_TREE);
17307 tree int_ftype_int_opaque_opaque
17308 = build_function_type_list (integer_type_node,
17309 integer_type_node, opaque_V4SI_type_node,
17310 opaque_V4SI_type_node, NULL_TREE);
17311 tree int_ftype_int_v4si_v4si
17312 = build_function_type_list (integer_type_node,
17313 integer_type_node, V4SI_type_node,
17314 V4SI_type_node, NULL_TREE);
17315 tree int_ftype_int_v2di_v2di
17316 = build_function_type_list (integer_type_node,
17317 integer_type_node, V2DI_type_node,
17318 V2DI_type_node, NULL_TREE);
17319 tree void_ftype_v4si
17320 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17321 tree v8hi_ftype_void
17322 = build_function_type_list (V8HI_type_node, NULL_TREE);
17323 tree void_ftype_void
17324 = build_function_type_list (void_type_node, NULL_TREE);
17325 tree void_ftype_int
17326 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17328 tree opaque_ftype_long_pcvoid
17329 = build_function_type_list (opaque_V4SI_type_node,
17330 long_integer_type_node, pcvoid_type_node,
17331 NULL_TREE);
17332 tree v16qi_ftype_long_pcvoid
17333 = build_function_type_list (V16QI_type_node,
17334 long_integer_type_node, pcvoid_type_node,
17335 NULL_TREE);
17336 tree v8hi_ftype_long_pcvoid
17337 = build_function_type_list (V8HI_type_node,
17338 long_integer_type_node, pcvoid_type_node,
17339 NULL_TREE);
17340 tree v4si_ftype_long_pcvoid
17341 = build_function_type_list (V4SI_type_node,
17342 long_integer_type_node, pcvoid_type_node,
17343 NULL_TREE);
17344 tree v4sf_ftype_long_pcvoid
17345 = build_function_type_list (V4SF_type_node,
17346 long_integer_type_node, pcvoid_type_node,
17347 NULL_TREE);
17348 tree v2df_ftype_long_pcvoid
17349 = build_function_type_list (V2DF_type_node,
17350 long_integer_type_node, pcvoid_type_node,
17351 NULL_TREE);
17352 tree v2di_ftype_long_pcvoid
17353 = build_function_type_list (V2DI_type_node,
17354 long_integer_type_node, pcvoid_type_node,
17355 NULL_TREE);
17357 tree void_ftype_opaque_long_pvoid
17358 = build_function_type_list (void_type_node,
17359 opaque_V4SI_type_node, long_integer_type_node,
17360 pvoid_type_node, NULL_TREE);
17361 tree void_ftype_v4si_long_pvoid
17362 = build_function_type_list (void_type_node,
17363 V4SI_type_node, long_integer_type_node,
17364 pvoid_type_node, NULL_TREE);
17365 tree void_ftype_v16qi_long_pvoid
17366 = build_function_type_list (void_type_node,
17367 V16QI_type_node, long_integer_type_node,
17368 pvoid_type_node, NULL_TREE);
17370 tree void_ftype_v16qi_pvoid_long
17371 = build_function_type_list (void_type_node,
17372 V16QI_type_node, pvoid_type_node,
17373 long_integer_type_node, NULL_TREE);
17375 tree void_ftype_v8hi_long_pvoid
17376 = build_function_type_list (void_type_node,
17377 V8HI_type_node, long_integer_type_node,
17378 pvoid_type_node, NULL_TREE);
17379 tree void_ftype_v4sf_long_pvoid
17380 = build_function_type_list (void_type_node,
17381 V4SF_type_node, long_integer_type_node,
17382 pvoid_type_node, NULL_TREE);
17383 tree void_ftype_v2df_long_pvoid
17384 = build_function_type_list (void_type_node,
17385 V2DF_type_node, long_integer_type_node,
17386 pvoid_type_node, NULL_TREE);
17387 tree void_ftype_v2di_long_pvoid
17388 = build_function_type_list (void_type_node,
17389 V2DI_type_node, long_integer_type_node,
17390 pvoid_type_node, NULL_TREE);
17391 tree int_ftype_int_v8hi_v8hi
17392 = build_function_type_list (integer_type_node,
17393 integer_type_node, V8HI_type_node,
17394 V8HI_type_node, NULL_TREE);
17395 tree int_ftype_int_v16qi_v16qi
17396 = build_function_type_list (integer_type_node,
17397 integer_type_node, V16QI_type_node,
17398 V16QI_type_node, NULL_TREE);
17399 tree int_ftype_int_v4sf_v4sf
17400 = build_function_type_list (integer_type_node,
17401 integer_type_node, V4SF_type_node,
17402 V4SF_type_node, NULL_TREE);
17403 tree int_ftype_int_v2df_v2df
17404 = build_function_type_list (integer_type_node,
17405 integer_type_node, V2DF_type_node,
17406 V2DF_type_node, NULL_TREE);
17407 tree v2di_ftype_v2di
17408 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17409 tree v4si_ftype_v4si
17410 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17411 tree v8hi_ftype_v8hi
17412 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17413 tree v16qi_ftype_v16qi
17414 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17415 tree v4sf_ftype_v4sf
17416 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17417 tree v2df_ftype_v2df
17418 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17419 tree void_ftype_pcvoid_int_int
17420 = build_function_type_list (void_type_node,
17421 pcvoid_type_node, integer_type_node,
17422 integer_type_node, NULL_TREE);
17424 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17425 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17426 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17427 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17428 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17429 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17430 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17431 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17432 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17433 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17434 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17435 ALTIVEC_BUILTIN_LVXL_V2DF);
17436 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17437 ALTIVEC_BUILTIN_LVXL_V2DI);
17438 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17439 ALTIVEC_BUILTIN_LVXL_V4SF);
17440 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17441 ALTIVEC_BUILTIN_LVXL_V4SI);
17442 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17443 ALTIVEC_BUILTIN_LVXL_V8HI);
17444 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17445 ALTIVEC_BUILTIN_LVXL_V16QI);
17446 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17447 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17448 ALTIVEC_BUILTIN_LVX_V2DF);
17449 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17450 ALTIVEC_BUILTIN_LVX_V2DI);
17451 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17452 ALTIVEC_BUILTIN_LVX_V4SF);
17453 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17454 ALTIVEC_BUILTIN_LVX_V4SI);
17455 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17456 ALTIVEC_BUILTIN_LVX_V8HI);
17457 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17458 ALTIVEC_BUILTIN_LVX_V16QI);
17459 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17460 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17461 ALTIVEC_BUILTIN_STVX_V2DF);
17462 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17463 ALTIVEC_BUILTIN_STVX_V2DI);
17464 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17465 ALTIVEC_BUILTIN_STVX_V4SF);
17466 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17467 ALTIVEC_BUILTIN_STVX_V4SI);
17468 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17469 ALTIVEC_BUILTIN_STVX_V8HI);
17470 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17471 ALTIVEC_BUILTIN_STVX_V16QI);
17472 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17473 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17474 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17475 ALTIVEC_BUILTIN_STVXL_V2DF);
17476 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17477 ALTIVEC_BUILTIN_STVXL_V2DI);
17478 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17479 ALTIVEC_BUILTIN_STVXL_V4SF);
17480 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17481 ALTIVEC_BUILTIN_STVXL_V4SI);
17482 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17483 ALTIVEC_BUILTIN_STVXL_V8HI);
17484 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17485 ALTIVEC_BUILTIN_STVXL_V16QI);
17486 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17487 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17488 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17489 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17490 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17491 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17492 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17493 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17494 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17495 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17496 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17497 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17498 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17499 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17500 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17501 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17503 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17504 VSX_BUILTIN_LXVD2X_V2DF);
17505 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17506 VSX_BUILTIN_LXVD2X_V2DI);
17507 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17508 VSX_BUILTIN_LXVW4X_V4SF);
17509 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17510 VSX_BUILTIN_LXVW4X_V4SI);
17511 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17512 VSX_BUILTIN_LXVW4X_V8HI);
17513 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17514 VSX_BUILTIN_LXVW4X_V16QI);
17515 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17516 VSX_BUILTIN_STXVD2X_V2DF);
17517 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17518 VSX_BUILTIN_STXVD2X_V2DI);
17519 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17520 VSX_BUILTIN_STXVW4X_V4SF);
17521 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17522 VSX_BUILTIN_STXVW4X_V4SI);
17523 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17524 VSX_BUILTIN_STXVW4X_V8HI);
17525 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17526 VSX_BUILTIN_STXVW4X_V16QI);
17528 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17529 VSX_BUILTIN_LD_ELEMREV_V2DF);
17530 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17531 VSX_BUILTIN_LD_ELEMREV_V2DI);
17532 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17533 VSX_BUILTIN_LD_ELEMREV_V4SF);
17534 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17535 VSX_BUILTIN_LD_ELEMREV_V4SI);
17536 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17537 VSX_BUILTIN_LD_ELEMREV_V8HI);
17538 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17539 VSX_BUILTIN_LD_ELEMREV_V16QI);
17540 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17541 VSX_BUILTIN_ST_ELEMREV_V2DF);
17542 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17543 VSX_BUILTIN_ST_ELEMREV_V2DI);
17544 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17545 VSX_BUILTIN_ST_ELEMREV_V4SF);
17546 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17547 VSX_BUILTIN_ST_ELEMREV_V4SI);
17548 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17549 VSX_BUILTIN_ST_ELEMREV_V8HI);
17550 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17551 VSX_BUILTIN_ST_ELEMREV_V16QI);
17553 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17554 VSX_BUILTIN_VEC_LD);
17555 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17556 VSX_BUILTIN_VEC_ST);
17557 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17558 VSX_BUILTIN_VEC_XL);
17559 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17560 VSX_BUILTIN_VEC_XL_BE);
17561 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17562 VSX_BUILTIN_VEC_XST);
17563 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17564 VSX_BUILTIN_VEC_XST_BE);
17566 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17567 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17568 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17570 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17571 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17572 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17573 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17574 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17575 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17576 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17577 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17578 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17579 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17580 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17581 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17583 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17584 ALTIVEC_BUILTIN_VEC_ADDE);
17585 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17586 ALTIVEC_BUILTIN_VEC_ADDEC);
17587 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17588 ALTIVEC_BUILTIN_VEC_CMPNE);
17589 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17590 ALTIVEC_BUILTIN_VEC_MUL);
17591 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17592 ALTIVEC_BUILTIN_VEC_SUBE);
17593 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17594 ALTIVEC_BUILTIN_VEC_SUBEC);
17596 /* Cell builtins. */
17597 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17598 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17599 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17600 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17602 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17603 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17604 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17605 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17607 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17608 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17609 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17610 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17612 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17613 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17614 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17615 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17617 if (TARGET_P9_VECTOR)
17619 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17620 P9V_BUILTIN_STXVL);
17621 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17622 P9V_BUILTIN_XST_LEN_R);
17625 /* Add the DST variants. */
17626 d = bdesc_dst;
17627 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17629 HOST_WIDE_INT mask = d->mask;
17631 /* It is expected that these dst built-in functions may have
17632 d->icode equal to CODE_FOR_nothing. */
17633 if ((mask & builtin_mask) != mask)
17635 if (TARGET_DEBUG_BUILTIN)
17636 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17637 d->name);
17638 continue;
17640 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17643 /* Initialize the predicates. */
17644 d = bdesc_altivec_preds;
17645 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17647 machine_mode mode1;
17648 tree type;
17649 HOST_WIDE_INT mask = d->mask;
17651 if ((mask & builtin_mask) != mask)
17653 if (TARGET_DEBUG_BUILTIN)
17654 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17655 d->name);
17656 continue;
17659 if (rs6000_overloaded_builtin_p (d->code))
17660 mode1 = VOIDmode;
17661 else
17663 /* Cannot define builtin if the instruction is disabled. */
17664 gcc_assert (d->icode != CODE_FOR_nothing);
17665 mode1 = insn_data[d->icode].operand[1].mode;
17668 switch (mode1)
17670 case E_VOIDmode:
17671 type = int_ftype_int_opaque_opaque;
17672 break;
17673 case E_V2DImode:
17674 type = int_ftype_int_v2di_v2di;
17675 break;
17676 case E_V4SImode:
17677 type = int_ftype_int_v4si_v4si;
17678 break;
17679 case E_V8HImode:
17680 type = int_ftype_int_v8hi_v8hi;
17681 break;
17682 case E_V16QImode:
17683 type = int_ftype_int_v16qi_v16qi;
17684 break;
17685 case E_V4SFmode:
17686 type = int_ftype_int_v4sf_v4sf;
17687 break;
17688 case E_V2DFmode:
17689 type = int_ftype_int_v2df_v2df;
17690 break;
17691 default:
17692 gcc_unreachable ();
17695 def_builtin (d->name, type, d->code);
17698 /* Initialize the abs* operators. */
17699 d = bdesc_abs;
17700 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17702 machine_mode mode0;
17703 tree type;
17704 HOST_WIDE_INT mask = d->mask;
17706 if ((mask & builtin_mask) != mask)
17708 if (TARGET_DEBUG_BUILTIN)
17709 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17710 d->name);
17711 continue;
17714 /* Cannot define builtin if the instruction is disabled. */
17715 gcc_assert (d->icode != CODE_FOR_nothing);
17716 mode0 = insn_data[d->icode].operand[0].mode;
17718 switch (mode0)
17720 case E_V2DImode:
17721 type = v2di_ftype_v2di;
17722 break;
17723 case E_V4SImode:
17724 type = v4si_ftype_v4si;
17725 break;
17726 case E_V8HImode:
17727 type = v8hi_ftype_v8hi;
17728 break;
17729 case E_V16QImode:
17730 type = v16qi_ftype_v16qi;
17731 break;
17732 case E_V4SFmode:
17733 type = v4sf_ftype_v4sf;
17734 break;
17735 case E_V2DFmode:
17736 type = v2df_ftype_v2df;
17737 break;
17738 default:
17739 gcc_unreachable ();
17742 def_builtin (d->name, type, d->code);
17745 /* Initialize target builtin that implements
17746 targetm.vectorize.builtin_mask_for_load. */
17748 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17749 v16qi_ftype_long_pcvoid,
17750 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17751 BUILT_IN_MD, NULL, NULL_TREE);
17752 TREE_READONLY (decl) = 1;
17753 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17754 altivec_builtin_mask_for_load = decl;
17756 /* Access to the vec_init patterns. */
17757 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17758 integer_type_node, integer_type_node,
17759 integer_type_node, NULL_TREE);
17760 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17762 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17763 short_integer_type_node,
17764 short_integer_type_node,
17765 short_integer_type_node,
17766 short_integer_type_node,
17767 short_integer_type_node,
17768 short_integer_type_node,
17769 short_integer_type_node, NULL_TREE);
17770 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17772 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17773 char_type_node, char_type_node,
17774 char_type_node, char_type_node,
17775 char_type_node, char_type_node,
17776 char_type_node, char_type_node,
17777 char_type_node, char_type_node,
17778 char_type_node, char_type_node,
17779 char_type_node, char_type_node,
17780 char_type_node, NULL_TREE);
17781 def_builtin ("__builtin_vec_init_v16qi", ftype,
17782 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17784 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17785 float_type_node, float_type_node,
17786 float_type_node, NULL_TREE);
17787 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17789 /* VSX builtins. */
17790 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17791 double_type_node, NULL_TREE);
17792 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17794 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17795 intDI_type_node, NULL_TREE);
17796 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17798 /* Access to the vec_set patterns. */
17799 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17800 intSI_type_node,
17801 integer_type_node, NULL_TREE);
17802 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17804 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17805 intHI_type_node,
17806 integer_type_node, NULL_TREE);
17807 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17809 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17810 intQI_type_node,
17811 integer_type_node, NULL_TREE);
17812 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17814 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17815 float_type_node,
17816 integer_type_node, NULL_TREE);
17817 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17819 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17820 double_type_node,
17821 integer_type_node, NULL_TREE);
17822 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17824 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17825 intDI_type_node,
17826 integer_type_node, NULL_TREE);
17827 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17829 /* Access to the vec_extract patterns. */
17830 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17831 integer_type_node, NULL_TREE);
17832 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17834 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17835 integer_type_node, NULL_TREE);
17836 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17838 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17839 integer_type_node, NULL_TREE);
17840 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17842 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17843 integer_type_node, NULL_TREE);
17844 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17846 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17847 integer_type_node, NULL_TREE);
17848 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17850 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17851 integer_type_node, NULL_TREE);
17852 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17855 if (V1TI_type_node)
17857 tree v1ti_ftype_long_pcvoid
17858 = build_function_type_list (V1TI_type_node,
17859 long_integer_type_node, pcvoid_type_node,
17860 NULL_TREE);
17861 tree void_ftype_v1ti_long_pvoid
17862 = build_function_type_list (void_type_node,
17863 V1TI_type_node, long_integer_type_node,
17864 pvoid_type_node, NULL_TREE);
17865 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17866 VSX_BUILTIN_LXVD2X_V1TI);
17867 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17868 VSX_BUILTIN_STXVD2X_V1TI);
17869 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17870 NULL_TREE, NULL_TREE);
17871 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17872 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17873 intTI_type_node,
17874 integer_type_node, NULL_TREE);
17875 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17876 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17877 integer_type_node, NULL_TREE);
17878 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17883 static void
17884 htm_init_builtins (void)
17886 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17887 const struct builtin_description *d;
17888 size_t i;
17890 d = bdesc_htm;
17891 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17893 tree op[MAX_HTM_OPERANDS], type;
17894 HOST_WIDE_INT mask = d->mask;
17895 unsigned attr = rs6000_builtin_info[d->code].attr;
17896 bool void_func = (attr & RS6000_BTC_VOID);
17897 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17898 int nopnds = 0;
17899 tree gpr_type_node;
17900 tree rettype;
17901 tree argtype;
17903 /* It is expected that these htm built-in functions may have
17904 d->icode equal to CODE_FOR_nothing. */
17906 if (TARGET_32BIT && TARGET_POWERPC64)
17907 gpr_type_node = long_long_unsigned_type_node;
17908 else
17909 gpr_type_node = long_unsigned_type_node;
17911 if (attr & RS6000_BTC_SPR)
17913 rettype = gpr_type_node;
17914 argtype = gpr_type_node;
17916 else if (d->code == HTM_BUILTIN_TABORTDC
17917 || d->code == HTM_BUILTIN_TABORTDCI)
17919 rettype = unsigned_type_node;
17920 argtype = gpr_type_node;
17922 else
17924 rettype = unsigned_type_node;
17925 argtype = unsigned_type_node;
17928 if ((mask & builtin_mask) != mask)
17930 if (TARGET_DEBUG_BUILTIN)
17931 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17932 continue;
17935 if (d->name == 0)
17937 if (TARGET_DEBUG_BUILTIN)
17938 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17939 (long unsigned) i);
17940 continue;
17943 op[nopnds++] = (void_func) ? void_type_node : rettype;
17945 if (attr_args == RS6000_BTC_UNARY)
17946 op[nopnds++] = argtype;
17947 else if (attr_args == RS6000_BTC_BINARY)
17949 op[nopnds++] = argtype;
17950 op[nopnds++] = argtype;
17952 else if (attr_args == RS6000_BTC_TERNARY)
17954 op[nopnds++] = argtype;
17955 op[nopnds++] = argtype;
17956 op[nopnds++] = argtype;
17959 switch (nopnds)
17961 case 1:
17962 type = build_function_type_list (op[0], NULL_TREE);
17963 break;
17964 case 2:
17965 type = build_function_type_list (op[0], op[1], NULL_TREE);
17966 break;
17967 case 3:
17968 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17969 break;
17970 case 4:
17971 type = build_function_type_list (op[0], op[1], op[2], op[3],
17972 NULL_TREE);
17973 break;
17974 default:
17975 gcc_unreachable ();
17978 def_builtin (d->name, type, d->code);
17982 /* Hash function for builtin functions with up to 3 arguments and a return
17983 type. */
17984 hashval_t
17985 builtin_hasher::hash (builtin_hash_struct *bh)
17987 unsigned ret = 0;
17988 int i;
17990 for (i = 0; i < 4; i++)
17992 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17993 ret = (ret * 2) + bh->uns_p[i];
17996 return ret;
17999 /* Compare builtin hash entries H1 and H2 for equivalence. */
18000 bool
18001 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
18003 return ((p1->mode[0] == p2->mode[0])
18004 && (p1->mode[1] == p2->mode[1])
18005 && (p1->mode[2] == p2->mode[2])
18006 && (p1->mode[3] == p2->mode[3])
18007 && (p1->uns_p[0] == p2->uns_p[0])
18008 && (p1->uns_p[1] == p2->uns_p[1])
18009 && (p1->uns_p[2] == p2->uns_p[2])
18010 && (p1->uns_p[3] == p2->uns_p[3]));
18013 /* Map types for builtin functions with an explicit return type and up to 3
18014 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18015 of the argument. */
18016 static tree
18017 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18018 machine_mode mode_arg1, machine_mode mode_arg2,
18019 enum rs6000_builtins builtin, const char *name)
18021 struct builtin_hash_struct h;
18022 struct builtin_hash_struct *h2;
18023 int num_args = 3;
18024 int i;
18025 tree ret_type = NULL_TREE;
18026 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18028 /* Create builtin_hash_table. */
18029 if (builtin_hash_table == NULL)
18030 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18032 h.type = NULL_TREE;
18033 h.mode[0] = mode_ret;
18034 h.mode[1] = mode_arg0;
18035 h.mode[2] = mode_arg1;
18036 h.mode[3] = mode_arg2;
18037 h.uns_p[0] = 0;
18038 h.uns_p[1] = 0;
18039 h.uns_p[2] = 0;
18040 h.uns_p[3] = 0;
18042 /* If the builtin is a type that produces unsigned results or takes unsigned
18043 arguments, and it is returned as a decl for the vectorizer (such as
18044 widening multiplies, permute), make sure the arguments and return value
18045 are type correct. */
18046 switch (builtin)
18048 /* unsigned 1 argument functions. */
18049 case CRYPTO_BUILTIN_VSBOX:
18050 case P8V_BUILTIN_VGBBD:
18051 case MISC_BUILTIN_CDTBCD:
18052 case MISC_BUILTIN_CBCDTD:
18053 h.uns_p[0] = 1;
18054 h.uns_p[1] = 1;
18055 break;
18057 /* unsigned 2 argument functions. */
18058 case ALTIVEC_BUILTIN_VMULEUB:
18059 case ALTIVEC_BUILTIN_VMULEUH:
18060 case ALTIVEC_BUILTIN_VMULEUW:
18061 case ALTIVEC_BUILTIN_VMULOUB:
18062 case ALTIVEC_BUILTIN_VMULOUH:
18063 case ALTIVEC_BUILTIN_VMULOUW:
18064 case CRYPTO_BUILTIN_VCIPHER:
18065 case CRYPTO_BUILTIN_VCIPHERLAST:
18066 case CRYPTO_BUILTIN_VNCIPHER:
18067 case CRYPTO_BUILTIN_VNCIPHERLAST:
18068 case CRYPTO_BUILTIN_VPMSUMB:
18069 case CRYPTO_BUILTIN_VPMSUMH:
18070 case CRYPTO_BUILTIN_VPMSUMW:
18071 case CRYPTO_BUILTIN_VPMSUMD:
18072 case CRYPTO_BUILTIN_VPMSUM:
18073 case MISC_BUILTIN_ADDG6S:
18074 case MISC_BUILTIN_DIVWEU:
18075 case MISC_BUILTIN_DIVWEUO:
18076 case MISC_BUILTIN_DIVDEU:
18077 case MISC_BUILTIN_DIVDEUO:
18078 case VSX_BUILTIN_UDIV_V2DI:
18079 case ALTIVEC_BUILTIN_VMAXUB:
18080 case ALTIVEC_BUILTIN_VMINUB:
18081 case ALTIVEC_BUILTIN_VMAXUH:
18082 case ALTIVEC_BUILTIN_VMINUH:
18083 case ALTIVEC_BUILTIN_VMAXUW:
18084 case ALTIVEC_BUILTIN_VMINUW:
18085 case P8V_BUILTIN_VMAXUD:
18086 case P8V_BUILTIN_VMINUD:
18087 h.uns_p[0] = 1;
18088 h.uns_p[1] = 1;
18089 h.uns_p[2] = 1;
18090 break;
18092 /* unsigned 3 argument functions. */
18093 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18094 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18095 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18096 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18097 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18098 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18099 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18100 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18101 case VSX_BUILTIN_VPERM_16QI_UNS:
18102 case VSX_BUILTIN_VPERM_8HI_UNS:
18103 case VSX_BUILTIN_VPERM_4SI_UNS:
18104 case VSX_BUILTIN_VPERM_2DI_UNS:
18105 case VSX_BUILTIN_XXSEL_16QI_UNS:
18106 case VSX_BUILTIN_XXSEL_8HI_UNS:
18107 case VSX_BUILTIN_XXSEL_4SI_UNS:
18108 case VSX_BUILTIN_XXSEL_2DI_UNS:
18109 case CRYPTO_BUILTIN_VPERMXOR:
18110 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18111 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18112 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18113 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18114 case CRYPTO_BUILTIN_VSHASIGMAW:
18115 case CRYPTO_BUILTIN_VSHASIGMAD:
18116 case CRYPTO_BUILTIN_VSHASIGMA:
18117 h.uns_p[0] = 1;
18118 h.uns_p[1] = 1;
18119 h.uns_p[2] = 1;
18120 h.uns_p[3] = 1;
18121 break;
18123 /* signed permute functions with unsigned char mask. */
18124 case ALTIVEC_BUILTIN_VPERM_16QI:
18125 case ALTIVEC_BUILTIN_VPERM_8HI:
18126 case ALTIVEC_BUILTIN_VPERM_4SI:
18127 case ALTIVEC_BUILTIN_VPERM_4SF:
18128 case ALTIVEC_BUILTIN_VPERM_2DI:
18129 case ALTIVEC_BUILTIN_VPERM_2DF:
18130 case VSX_BUILTIN_VPERM_16QI:
18131 case VSX_BUILTIN_VPERM_8HI:
18132 case VSX_BUILTIN_VPERM_4SI:
18133 case VSX_BUILTIN_VPERM_4SF:
18134 case VSX_BUILTIN_VPERM_2DI:
18135 case VSX_BUILTIN_VPERM_2DF:
18136 h.uns_p[3] = 1;
18137 break;
18139 /* unsigned args, signed return. */
18140 case VSX_BUILTIN_XVCVUXDSP:
18141 case VSX_BUILTIN_XVCVUXDDP_UNS:
18142 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18143 h.uns_p[1] = 1;
18144 break;
18146 /* signed args, unsigned return. */
18147 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18148 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18149 case MISC_BUILTIN_UNPACK_TD:
18150 case MISC_BUILTIN_UNPACK_V1TI:
18151 h.uns_p[0] = 1;
18152 break;
18154 /* unsigned arguments, bool return (compares). */
18155 case ALTIVEC_BUILTIN_VCMPEQUB:
18156 case ALTIVEC_BUILTIN_VCMPEQUH:
18157 case ALTIVEC_BUILTIN_VCMPEQUW:
18158 case P8V_BUILTIN_VCMPEQUD:
18159 case VSX_BUILTIN_CMPGE_U16QI:
18160 case VSX_BUILTIN_CMPGE_U8HI:
18161 case VSX_BUILTIN_CMPGE_U4SI:
18162 case VSX_BUILTIN_CMPGE_U2DI:
18163 case ALTIVEC_BUILTIN_VCMPGTUB:
18164 case ALTIVEC_BUILTIN_VCMPGTUH:
18165 case ALTIVEC_BUILTIN_VCMPGTUW:
18166 case P8V_BUILTIN_VCMPGTUD:
18167 h.uns_p[1] = 1;
18168 h.uns_p[2] = 1;
18169 break;
18171 /* unsigned arguments for 128-bit pack instructions. */
18172 case MISC_BUILTIN_PACK_TD:
18173 case MISC_BUILTIN_PACK_V1TI:
18174 h.uns_p[1] = 1;
18175 h.uns_p[2] = 1;
18176 break;
18178 /* unsigned second arguments (vector shift right). */
18179 case ALTIVEC_BUILTIN_VSRB:
18180 case ALTIVEC_BUILTIN_VSRH:
18181 case ALTIVEC_BUILTIN_VSRW:
18182 case P8V_BUILTIN_VSRD:
18183 h.uns_p[2] = 1;
18184 break;
18186 default:
18187 break;
18190 /* Figure out how many args are present. */
18191 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18192 num_args--;
18194 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18195 if (!ret_type && h.uns_p[0])
18196 ret_type = builtin_mode_to_type[h.mode[0]][0];
18198 if (!ret_type)
18199 fatal_error (input_location,
18200 "internal error: builtin function %qs had an unexpected "
18201 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18203 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18204 arg_type[i] = NULL_TREE;
18206 for (i = 0; i < num_args; i++)
18208 int m = (int) h.mode[i+1];
18209 int uns_p = h.uns_p[i+1];
18211 arg_type[i] = builtin_mode_to_type[m][uns_p];
18212 if (!arg_type[i] && uns_p)
18213 arg_type[i] = builtin_mode_to_type[m][0];
18215 if (!arg_type[i])
18216 fatal_error (input_location,
18217 "internal error: builtin function %qs, argument %d "
18218 "had unexpected argument type %qs", name, i,
18219 GET_MODE_NAME (m));
18222 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18223 if (*found == NULL)
18225 h2 = ggc_alloc<builtin_hash_struct> ();
18226 *h2 = h;
18227 *found = h2;
18229 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18230 arg_type[2], NULL_TREE);
18233 return (*found)->type;
18236 static void
18237 rs6000_common_init_builtins (void)
18239 const struct builtin_description *d;
18240 size_t i;
18242 tree opaque_ftype_opaque = NULL_TREE;
18243 tree opaque_ftype_opaque_opaque = NULL_TREE;
18244 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18245 tree v2si_ftype = NULL_TREE;
18246 tree v2si_ftype_qi = NULL_TREE;
18247 tree v2si_ftype_v2si_qi = NULL_TREE;
18248 tree v2si_ftype_int_qi = NULL_TREE;
18249 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18251 if (!TARGET_PAIRED_FLOAT)
18253 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18254 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18257 /* Paired builtins are only available if you build a compiler with the
18258 appropriate options, so only create those builtins with the appropriate
18259 compiler option. Create Altivec and VSX builtins on machines with at
18260 least the general purpose extensions (970 and newer) to allow the use of
18261 the target attribute.. */
18263 if (TARGET_EXTRA_BUILTINS)
18264 builtin_mask |= RS6000_BTM_COMMON;
18266 /* Add the ternary operators. */
18267 d = bdesc_3arg;
18268 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18270 tree type;
18271 HOST_WIDE_INT mask = d->mask;
18273 if ((mask & builtin_mask) != mask)
18275 if (TARGET_DEBUG_BUILTIN)
18276 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18277 continue;
18280 if (rs6000_overloaded_builtin_p (d->code))
18282 if (! (type = opaque_ftype_opaque_opaque_opaque))
18283 type = opaque_ftype_opaque_opaque_opaque
18284 = build_function_type_list (opaque_V4SI_type_node,
18285 opaque_V4SI_type_node,
18286 opaque_V4SI_type_node,
18287 opaque_V4SI_type_node,
18288 NULL_TREE);
18290 else
18292 enum insn_code icode = d->icode;
18293 if (d->name == 0)
18295 if (TARGET_DEBUG_BUILTIN)
18296 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18297 (long unsigned)i);
18299 continue;
18302 if (icode == CODE_FOR_nothing)
18304 if (TARGET_DEBUG_BUILTIN)
18305 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18306 d->name);
18308 continue;
18311 type = builtin_function_type (insn_data[icode].operand[0].mode,
18312 insn_data[icode].operand[1].mode,
18313 insn_data[icode].operand[2].mode,
18314 insn_data[icode].operand[3].mode,
18315 d->code, d->name);
18318 def_builtin (d->name, type, d->code);
18321 /* Add the binary operators. */
18322 d = bdesc_2arg;
18323 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18325 machine_mode mode0, mode1, mode2;
18326 tree type;
18327 HOST_WIDE_INT mask = d->mask;
18329 if ((mask & builtin_mask) != mask)
18331 if (TARGET_DEBUG_BUILTIN)
18332 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18333 continue;
18336 if (rs6000_overloaded_builtin_p (d->code))
18338 if (! (type = opaque_ftype_opaque_opaque))
18339 type = opaque_ftype_opaque_opaque
18340 = build_function_type_list (opaque_V4SI_type_node,
18341 opaque_V4SI_type_node,
18342 opaque_V4SI_type_node,
18343 NULL_TREE);
18345 else
18347 enum insn_code icode = d->icode;
18348 if (d->name == 0)
18350 if (TARGET_DEBUG_BUILTIN)
18351 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18352 (long unsigned)i);
18354 continue;
18357 if (icode == CODE_FOR_nothing)
18359 if (TARGET_DEBUG_BUILTIN)
18360 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18361 d->name);
18363 continue;
18366 mode0 = insn_data[icode].operand[0].mode;
18367 mode1 = insn_data[icode].operand[1].mode;
18368 mode2 = insn_data[icode].operand[2].mode;
18370 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18372 if (! (type = v2si_ftype_v2si_qi))
18373 type = v2si_ftype_v2si_qi
18374 = build_function_type_list (opaque_V2SI_type_node,
18375 opaque_V2SI_type_node,
18376 char_type_node,
18377 NULL_TREE);
18380 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18381 && mode2 == QImode)
18383 if (! (type = v2si_ftype_int_qi))
18384 type = v2si_ftype_int_qi
18385 = build_function_type_list (opaque_V2SI_type_node,
18386 integer_type_node,
18387 char_type_node,
18388 NULL_TREE);
18391 else
18392 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18393 d->code, d->name);
18396 def_builtin (d->name, type, d->code);
18399 /* Add the simple unary operators. */
18400 d = bdesc_1arg;
18401 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18403 machine_mode mode0, mode1;
18404 tree type;
18405 HOST_WIDE_INT mask = d->mask;
18407 if ((mask & builtin_mask) != mask)
18409 if (TARGET_DEBUG_BUILTIN)
18410 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18411 continue;
18414 if (rs6000_overloaded_builtin_p (d->code))
18416 if (! (type = opaque_ftype_opaque))
18417 type = opaque_ftype_opaque
18418 = build_function_type_list (opaque_V4SI_type_node,
18419 opaque_V4SI_type_node,
18420 NULL_TREE);
18422 else
18424 enum insn_code icode = d->icode;
18425 if (d->name == 0)
18427 if (TARGET_DEBUG_BUILTIN)
18428 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18429 (long unsigned)i);
18431 continue;
18434 if (icode == CODE_FOR_nothing)
18436 if (TARGET_DEBUG_BUILTIN)
18437 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18438 d->name);
18440 continue;
18443 mode0 = insn_data[icode].operand[0].mode;
18444 mode1 = insn_data[icode].operand[1].mode;
18446 if (mode0 == V2SImode && mode1 == QImode)
18448 if (! (type = v2si_ftype_qi))
18449 type = v2si_ftype_qi
18450 = build_function_type_list (opaque_V2SI_type_node,
18451 char_type_node,
18452 NULL_TREE);
18455 else
18456 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18457 d->code, d->name);
18460 def_builtin (d->name, type, d->code);
18463 /* Add the simple no-argument operators. */
18464 d = bdesc_0arg;
18465 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18467 machine_mode mode0;
18468 tree type;
18469 HOST_WIDE_INT mask = d->mask;
18471 if ((mask & builtin_mask) != mask)
18473 if (TARGET_DEBUG_BUILTIN)
18474 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18475 continue;
18477 if (rs6000_overloaded_builtin_p (d->code))
18479 if (!opaque_ftype_opaque)
18480 opaque_ftype_opaque
18481 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18482 type = opaque_ftype_opaque;
18484 else
18486 enum insn_code icode = d->icode;
18487 if (d->name == 0)
18489 if (TARGET_DEBUG_BUILTIN)
18490 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18491 (long unsigned) i);
18492 continue;
18494 if (icode == CODE_FOR_nothing)
18496 if (TARGET_DEBUG_BUILTIN)
18497 fprintf (stderr,
18498 "rs6000_builtin, skip no-argument %s (no code)\n",
18499 d->name);
18500 continue;
18502 mode0 = insn_data[icode].operand[0].mode;
18503 if (mode0 == V2SImode)
18505 /* code for paired single */
18506 if (! (type = v2si_ftype))
18508 v2si_ftype
18509 = build_function_type_list (opaque_V2SI_type_node,
18510 NULL_TREE);
18511 type = v2si_ftype;
18514 else
18515 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18516 d->code, d->name);
18518 def_builtin (d->name, type, d->code);
18522 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18523 static void
18524 init_float128_ibm (machine_mode mode)
18526 if (!TARGET_XL_COMPAT)
18528 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18529 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18530 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18531 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18533 if (!TARGET_HARD_FLOAT)
18535 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18536 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18537 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18538 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18539 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18540 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18541 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18542 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18544 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18545 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18546 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18547 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18548 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18549 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18550 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18551 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18554 else
18556 set_optab_libfunc (add_optab, mode, "_xlqadd");
18557 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18558 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18559 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18562 /* Add various conversions for IFmode to use the traditional TFmode
18563 names. */
18564 if (mode == IFmode)
18566 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18567 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18568 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18569 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18570 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18571 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18573 if (TARGET_POWERPC64)
18575 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18576 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18577 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18578 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18583 /* Set up IEEE 128-bit floating point routines. Use different names if the
18584 arguments can be passed in a vector register. The historical PowerPC
18585 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18586 continue to use that if we aren't using vector registers to pass IEEE
18587 128-bit floating point. */
18589 static void
18590 init_float128_ieee (machine_mode mode)
18592 if (FLOAT128_VECTOR_P (mode))
18594 set_optab_libfunc (add_optab, mode, "__addkf3");
18595 set_optab_libfunc (sub_optab, mode, "__subkf3");
18596 set_optab_libfunc (neg_optab, mode, "__negkf2");
18597 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18598 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18599 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18600 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18602 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18603 set_optab_libfunc (ne_optab, mode, "__nekf2");
18604 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18605 set_optab_libfunc (ge_optab, mode, "__gekf2");
18606 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18607 set_optab_libfunc (le_optab, mode, "__lekf2");
18608 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18610 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18611 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18612 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18613 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18615 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18616 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18617 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18619 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18620 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18621 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18623 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18624 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18625 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18626 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18627 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18628 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18630 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18631 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18632 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18633 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18635 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18636 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18637 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18638 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18640 if (TARGET_POWERPC64)
18642 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18643 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18644 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18645 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18649 else
18651 set_optab_libfunc (add_optab, mode, "_q_add");
18652 set_optab_libfunc (sub_optab, mode, "_q_sub");
18653 set_optab_libfunc (neg_optab, mode, "_q_neg");
18654 set_optab_libfunc (smul_optab, mode, "_q_mul");
18655 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18656 if (TARGET_PPC_GPOPT)
18657 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18659 set_optab_libfunc (eq_optab, mode, "_q_feq");
18660 set_optab_libfunc (ne_optab, mode, "_q_fne");
18661 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18662 set_optab_libfunc (ge_optab, mode, "_q_fge");
18663 set_optab_libfunc (lt_optab, mode, "_q_flt");
18664 set_optab_libfunc (le_optab, mode, "_q_fle");
18666 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18667 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18668 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18669 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18670 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18671 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18672 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18673 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18677 static void
18678 rs6000_init_libfuncs (void)
18680 /* __float128 support. */
18681 if (TARGET_FLOAT128_TYPE)
18683 init_float128_ibm (IFmode);
18684 init_float128_ieee (KFmode);
18687 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18688 if (TARGET_LONG_DOUBLE_128)
18690 if (!TARGET_IEEEQUAD)
18691 init_float128_ibm (TFmode);
18693 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18694 else
18695 init_float128_ieee (TFmode);
18699 /* Emit a potentially record-form instruction, setting DST from SRC.
18700 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18701 signed comparison of DST with zero. If DOT is 1, the generated RTL
18702 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18703 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18704 a separate COMPARE. */
18706 void
18707 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18709 if (dot == 0)
18711 emit_move_insn (dst, src);
18712 return;
18715 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18717 emit_move_insn (dst, src);
18718 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18719 return;
18722 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18723 if (dot == 1)
18725 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18726 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18728 else
18730 rtx set = gen_rtx_SET (dst, src);
18731 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18736 /* A validation routine: say whether CODE, a condition code, and MODE
18737 match. The other alternatives either don't make sense or should
18738 never be generated. */
18740 void
18741 validate_condition_mode (enum rtx_code code, machine_mode mode)
18743 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18744 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18745 && GET_MODE_CLASS (mode) == MODE_CC);
18747 /* These don't make sense. */
18748 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18749 || mode != CCUNSmode);
18751 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18752 || mode == CCUNSmode);
18754 gcc_assert (mode == CCFPmode
18755 || (code != ORDERED && code != UNORDERED
18756 && code != UNEQ && code != LTGT
18757 && code != UNGT && code != UNLT
18758 && code != UNGE && code != UNLE));
18760 /* These should never be generated except for
18761 flag_finite_math_only. */
18762 gcc_assert (mode != CCFPmode
18763 || flag_finite_math_only
18764 || (code != LE && code != GE
18765 && code != UNEQ && code != LTGT
18766 && code != UNGT && code != UNLT));
18768 /* These are invalid; the information is not there. */
18769 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18773 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18774 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18775 not zero, store there the bit offset (counted from the right) where
18776 the single stretch of 1 bits begins; and similarly for B, the bit
18777 offset where it ends. */
18779 bool
18780 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18782 unsigned HOST_WIDE_INT val = INTVAL (mask);
18783 unsigned HOST_WIDE_INT bit;
18784 int nb, ne;
18785 int n = GET_MODE_PRECISION (mode);
18787 if (mode != DImode && mode != SImode)
18788 return false;
18790 if (INTVAL (mask) >= 0)
18792 bit = val & -val;
18793 ne = exact_log2 (bit);
18794 nb = exact_log2 (val + bit);
18796 else if (val + 1 == 0)
18798 nb = n;
18799 ne = 0;
18801 else if (val & 1)
18803 val = ~val;
18804 bit = val & -val;
18805 nb = exact_log2 (bit);
18806 ne = exact_log2 (val + bit);
18808 else
18810 bit = val & -val;
18811 ne = exact_log2 (bit);
18812 if (val + bit == 0)
18813 nb = n;
18814 else
18815 nb = 0;
18818 nb--;
18820 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18821 return false;
18823 if (b)
18824 *b = nb;
18825 if (e)
18826 *e = ne;
18828 return true;
18831 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18832 or rldicr instruction, to implement an AND with it in mode MODE. */
18834 bool
18835 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18837 int nb, ne;
18839 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18840 return false;
18842 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18843 does not wrap. */
18844 if (mode == DImode)
18845 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18847 /* For SImode, rlwinm can do everything. */
18848 if (mode == SImode)
18849 return (nb < 32 && ne < 32);
18851 return false;
18854 /* Return the instruction template for an AND with mask in mode MODE, with
18855 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18857 const char *
18858 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18860 int nb, ne;
18862 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18863 gcc_unreachable ();
18865 if (mode == DImode && ne == 0)
18867 operands[3] = GEN_INT (63 - nb);
18868 if (dot)
18869 return "rldicl. %0,%1,0,%3";
18870 return "rldicl %0,%1,0,%3";
18873 if (mode == DImode && nb == 63)
18875 operands[3] = GEN_INT (63 - ne);
18876 if (dot)
18877 return "rldicr. %0,%1,0,%3";
18878 return "rldicr %0,%1,0,%3";
18881 if (nb < 32 && ne < 32)
18883 operands[3] = GEN_INT (31 - nb);
18884 operands[4] = GEN_INT (31 - ne);
18885 if (dot)
18886 return "rlwinm. %0,%1,0,%3,%4";
18887 return "rlwinm %0,%1,0,%3,%4";
18890 gcc_unreachable ();
18893 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18894 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18895 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18897 bool
18898 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18900 int nb, ne;
18902 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18903 return false;
18905 int n = GET_MODE_PRECISION (mode);
18906 int sh = -1;
18908 if (CONST_INT_P (XEXP (shift, 1)))
18910 sh = INTVAL (XEXP (shift, 1));
18911 if (sh < 0 || sh >= n)
18912 return false;
18915 rtx_code code = GET_CODE (shift);
18917 /* Convert any shift by 0 to a rotate, to simplify below code. */
18918 if (sh == 0)
18919 code = ROTATE;
18921 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18922 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18923 code = ASHIFT;
18924 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18926 code = LSHIFTRT;
18927 sh = n - sh;
18930 /* DImode rotates need rld*. */
18931 if (mode == DImode && code == ROTATE)
18932 return (nb == 63 || ne == 0 || ne == sh);
18934 /* SImode rotates need rlw*. */
18935 if (mode == SImode && code == ROTATE)
18936 return (nb < 32 && ne < 32 && sh < 32);
18938 /* Wrap-around masks are only okay for rotates. */
18939 if (ne > nb)
18940 return false;
18942 /* Variable shifts are only okay for rotates. */
18943 if (sh < 0)
18944 return false;
18946 /* Don't allow ASHIFT if the mask is wrong for that. */
18947 if (code == ASHIFT && ne < sh)
18948 return false;
18950 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18951 if the mask is wrong for that. */
18952 if (nb < 32 && ne < 32 && sh < 32
18953 && !(code == LSHIFTRT && nb >= 32 - sh))
18954 return true;
18956 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18957 if the mask is wrong for that. */
18958 if (code == LSHIFTRT)
18959 sh = 64 - sh;
18960 if (nb == 63 || ne == 0 || ne == sh)
18961 return !(code == LSHIFTRT && nb >= sh);
18963 return false;
18966 /* Return the instruction template for a shift with mask in mode MODE, with
18967 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18969 const char *
18970 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18972 int nb, ne;
18974 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18975 gcc_unreachable ();
18977 if (mode == DImode && ne == 0)
18979 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18980 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18981 operands[3] = GEN_INT (63 - nb);
18982 if (dot)
18983 return "rld%I2cl. %0,%1,%2,%3";
18984 return "rld%I2cl %0,%1,%2,%3";
18987 if (mode == DImode && nb == 63)
18989 operands[3] = GEN_INT (63 - ne);
18990 if (dot)
18991 return "rld%I2cr. %0,%1,%2,%3";
18992 return "rld%I2cr %0,%1,%2,%3";
18995 if (mode == DImode
18996 && GET_CODE (operands[4]) != LSHIFTRT
18997 && CONST_INT_P (operands[2])
18998 && ne == INTVAL (operands[2]))
19000 operands[3] = GEN_INT (63 - nb);
19001 if (dot)
19002 return "rld%I2c. %0,%1,%2,%3";
19003 return "rld%I2c %0,%1,%2,%3";
19006 if (nb < 32 && ne < 32)
19008 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19009 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19010 operands[3] = GEN_INT (31 - nb);
19011 operands[4] = GEN_INT (31 - ne);
19012 /* This insn can also be a 64-bit rotate with mask that really makes
19013 it just a shift right (with mask); the %h below are to adjust for
19014 that situation (shift count is >= 32 in that case). */
19015 if (dot)
19016 return "rlw%I2nm. %0,%1,%h2,%3,%4";
19017 return "rlw%I2nm %0,%1,%h2,%3,%4";
19020 gcc_unreachable ();
19023 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
19024 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
19025 ASHIFT, or LSHIFTRT) in mode MODE. */
19027 bool
19028 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
19030 int nb, ne;
19032 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
19033 return false;
19035 int n = GET_MODE_PRECISION (mode);
19037 int sh = INTVAL (XEXP (shift, 1));
19038 if (sh < 0 || sh >= n)
19039 return false;
19041 rtx_code code = GET_CODE (shift);
19043 /* Convert any shift by 0 to a rotate, to simplify below code. */
19044 if (sh == 0)
19045 code = ROTATE;
19047 /* Convert rotate to simple shift if we can, to make analysis simpler. */
19048 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
19049 code = ASHIFT;
19050 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
19052 code = LSHIFTRT;
19053 sh = n - sh;
19056 /* DImode rotates need rldimi. */
19057 if (mode == DImode && code == ROTATE)
19058 return (ne == sh);
19060 /* SImode rotates need rlwimi. */
19061 if (mode == SImode && code == ROTATE)
19062 return (nb < 32 && ne < 32 && sh < 32);
19064 /* Wrap-around masks are only okay for rotates. */
19065 if (ne > nb)
19066 return false;
19068 /* Don't allow ASHIFT if the mask is wrong for that. */
19069 if (code == ASHIFT && ne < sh)
19070 return false;
19072 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
19073 if the mask is wrong for that. */
19074 if (nb < 32 && ne < 32 && sh < 32
19075 && !(code == LSHIFTRT && nb >= 32 - sh))
19076 return true;
19078 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
19079 if the mask is wrong for that. */
19080 if (code == LSHIFTRT)
19081 sh = 64 - sh;
19082 if (ne == sh)
19083 return !(code == LSHIFTRT && nb >= sh);
19085 return false;
19088 /* Return the instruction template for an insert with mask in mode MODE, with
19089 operands OPERANDS. If DOT is true, make it a record-form instruction. */
19091 const char *
19092 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
19094 int nb, ne;
19096 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
19097 gcc_unreachable ();
19099 /* Prefer rldimi because rlwimi is cracked. */
19100 if (TARGET_POWERPC64
19101 && (!dot || mode == DImode)
19102 && GET_CODE (operands[4]) != LSHIFTRT
19103 && ne == INTVAL (operands[2]))
19105 operands[3] = GEN_INT (63 - nb);
19106 if (dot)
19107 return "rldimi. %0,%1,%2,%3";
19108 return "rldimi %0,%1,%2,%3";
19111 if (nb < 32 && ne < 32)
19113 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19114 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19115 operands[3] = GEN_INT (31 - nb);
19116 operands[4] = GEN_INT (31 - ne);
19117 if (dot)
19118 return "rlwimi. %0,%1,%2,%3,%4";
19119 return "rlwimi %0,%1,%2,%3,%4";
19122 gcc_unreachable ();
19125 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19126 using two machine instructions. */
19128 bool
19129 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19131 /* There are two kinds of AND we can handle with two insns:
19132 1) those we can do with two rl* insn;
19133 2) ori[s];xori[s].
19135 We do not handle that last case yet. */
19137 /* If there is just one stretch of ones, we can do it. */
19138 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19139 return true;
19141 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19142 one insn, we can do the whole thing with two. */
19143 unsigned HOST_WIDE_INT val = INTVAL (c);
19144 unsigned HOST_WIDE_INT bit1 = val & -val;
19145 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19146 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19147 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19148 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19151 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19152 If EXPAND is true, split rotate-and-mask instructions we generate to
19153 their constituent parts as well (this is used during expand); if DOT
19154 is 1, make the last insn a record-form instruction clobbering the
19155 destination GPR and setting the CC reg (from operands[3]); if 2, set
19156 that GPR as well as the CC reg. */
19158 void
19159 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19161 gcc_assert (!(expand && dot));
19163 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19165 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19166 shift right. This generates better code than doing the masks without
19167 shifts, or shifting first right and then left. */
19168 int nb, ne;
19169 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19171 gcc_assert (mode == DImode);
19173 int shift = 63 - nb;
19174 if (expand)
19176 rtx tmp1 = gen_reg_rtx (DImode);
19177 rtx tmp2 = gen_reg_rtx (DImode);
19178 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19179 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19180 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19182 else
19184 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19185 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19186 emit_move_insn (operands[0], tmp);
19187 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19188 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19190 return;
19193 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19194 that does the rest. */
19195 unsigned HOST_WIDE_INT bit1 = val & -val;
19196 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19197 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19198 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19200 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19201 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19203 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19205 /* Two "no-rotate"-and-mask instructions, for SImode. */
19206 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19208 gcc_assert (mode == SImode);
19210 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19211 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19212 emit_move_insn (reg, tmp);
19213 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19214 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19215 return;
19218 gcc_assert (mode == DImode);
19220 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19221 insns; we have to do the first in SImode, because it wraps. */
19222 if (mask2 <= 0xffffffff
19223 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19225 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19226 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19227 GEN_INT (mask1));
19228 rtx reg_low = gen_lowpart (SImode, reg);
19229 emit_move_insn (reg_low, tmp);
19230 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19231 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19232 return;
19235 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19236 at the top end), rotate back and clear the other hole. */
19237 int right = exact_log2 (bit3);
19238 int left = 64 - right;
19240 /* Rotate the mask too. */
19241 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19243 if (expand)
19245 rtx tmp1 = gen_reg_rtx (DImode);
19246 rtx tmp2 = gen_reg_rtx (DImode);
19247 rtx tmp3 = gen_reg_rtx (DImode);
19248 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19249 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19250 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19251 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19253 else
19255 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19256 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19257 emit_move_insn (operands[0], tmp);
19258 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19259 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19260 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19264 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19265 for lfq and stfq insns iff the registers are hard registers. */
19268 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19270 /* We might have been passed a SUBREG. */
19271 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19272 return 0;
19274 /* We might have been passed non floating point registers. */
19275 if (!FP_REGNO_P (REGNO (reg1))
19276 || !FP_REGNO_P (REGNO (reg2)))
19277 return 0;
19279 return (REGNO (reg1) == REGNO (reg2) - 1);
19282 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19283 addr1 and addr2 must be in consecutive memory locations
19284 (addr2 == addr1 + 8). */
19287 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19289 rtx addr1, addr2;
19290 unsigned int reg1, reg2;
19291 int offset1, offset2;
19293 /* The mems cannot be volatile. */
19294 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19295 return 0;
19297 addr1 = XEXP (mem1, 0);
19298 addr2 = XEXP (mem2, 0);
19300 /* Extract an offset (if used) from the first addr. */
19301 if (GET_CODE (addr1) == PLUS)
19303 /* If not a REG, return zero. */
19304 if (GET_CODE (XEXP (addr1, 0)) != REG)
19305 return 0;
19306 else
19308 reg1 = REGNO (XEXP (addr1, 0));
19309 /* The offset must be constant! */
19310 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19311 return 0;
19312 offset1 = INTVAL (XEXP (addr1, 1));
19315 else if (GET_CODE (addr1) != REG)
19316 return 0;
19317 else
19319 reg1 = REGNO (addr1);
19320 /* This was a simple (mem (reg)) expression. Offset is 0. */
19321 offset1 = 0;
19324 /* And now for the second addr. */
19325 if (GET_CODE (addr2) == PLUS)
19327 /* If not a REG, return zero. */
19328 if (GET_CODE (XEXP (addr2, 0)) != REG)
19329 return 0;
19330 else
19332 reg2 = REGNO (XEXP (addr2, 0));
19333 /* The offset must be constant. */
19334 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19335 return 0;
19336 offset2 = INTVAL (XEXP (addr2, 1));
19339 else if (GET_CODE (addr2) != REG)
19340 return 0;
19341 else
19343 reg2 = REGNO (addr2);
19344 /* This was a simple (mem (reg)) expression. Offset is 0. */
19345 offset2 = 0;
19348 /* Both of these must have the same base register. */
19349 if (reg1 != reg2)
19350 return 0;
19352 /* The offset for the second addr must be 8 more than the first addr. */
19353 if (offset2 != offset1 + 8)
19354 return 0;
19356 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19357 instructions. */
19358 return 1;
19361 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19362 need to use DDmode, in all other cases we can use the same mode. */
19363 static machine_mode
19364 rs6000_secondary_memory_needed_mode (machine_mode mode)
19366 if (lra_in_progress && mode == SDmode)
19367 return DDmode;
19368 return mode;
19371 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19372 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19373 only work on the traditional altivec registers, note if an altivec register
19374 was chosen. */
19376 static enum rs6000_reg_type
19377 register_to_reg_type (rtx reg, bool *is_altivec)
19379 HOST_WIDE_INT regno;
19380 enum reg_class rclass;
19382 if (GET_CODE (reg) == SUBREG)
19383 reg = SUBREG_REG (reg);
19385 if (!REG_P (reg))
19386 return NO_REG_TYPE;
19388 regno = REGNO (reg);
19389 if (regno >= FIRST_PSEUDO_REGISTER)
19391 if (!lra_in_progress && !reload_completed)
19392 return PSEUDO_REG_TYPE;
19394 regno = true_regnum (reg);
19395 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19396 return PSEUDO_REG_TYPE;
19399 gcc_assert (regno >= 0);
19401 if (is_altivec && ALTIVEC_REGNO_P (regno))
19402 *is_altivec = true;
19404 rclass = rs6000_regno_regclass[regno];
19405 return reg_class_to_reg_type[(int)rclass];
19408 /* Helper function to return the cost of adding a TOC entry address. */
19410 static inline int
19411 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19413 int ret;
19415 if (TARGET_CMODEL != CMODEL_SMALL)
19416 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19418 else
19419 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19421 return ret;
19424 /* Helper function for rs6000_secondary_reload to determine whether the memory
19425 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19426 needs reloading. Return negative if the memory is not handled by the memory
19427 helper functions and to try a different reload method, 0 if no additional
19428 instructions are need, and positive to give the extra cost for the
19429 memory. */
19431 static int
19432 rs6000_secondary_reload_memory (rtx addr,
19433 enum reg_class rclass,
19434 machine_mode mode)
19436 int extra_cost = 0;
19437 rtx reg, and_arg, plus_arg0, plus_arg1;
19438 addr_mask_type addr_mask;
19439 const char *type = NULL;
19440 const char *fail_msg = NULL;
19442 if (GPR_REG_CLASS_P (rclass))
19443 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19445 else if (rclass == FLOAT_REGS)
19446 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19448 else if (rclass == ALTIVEC_REGS)
19449 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19451 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19452 else if (rclass == VSX_REGS)
19453 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19454 & ~RELOAD_REG_AND_M16);
19456 /* If the register allocator hasn't made up its mind yet on the register
19457 class to use, settle on defaults to use. */
19458 else if (rclass == NO_REGS)
19460 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19461 & ~RELOAD_REG_AND_M16);
19463 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19464 addr_mask &= ~(RELOAD_REG_INDEXED
19465 | RELOAD_REG_PRE_INCDEC
19466 | RELOAD_REG_PRE_MODIFY);
19469 else
19470 addr_mask = 0;
19472 /* If the register isn't valid in this register class, just return now. */
19473 if ((addr_mask & RELOAD_REG_VALID) == 0)
19475 if (TARGET_DEBUG_ADDR)
19477 fprintf (stderr,
19478 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19479 "not valid in class\n",
19480 GET_MODE_NAME (mode), reg_class_names[rclass]);
19481 debug_rtx (addr);
19484 return -1;
19487 switch (GET_CODE (addr))
19489 /* Does the register class supports auto update forms for this mode? We
19490 don't need a scratch register, since the powerpc only supports
19491 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19492 case PRE_INC:
19493 case PRE_DEC:
19494 reg = XEXP (addr, 0);
19495 if (!base_reg_operand (addr, GET_MODE (reg)))
19497 fail_msg = "no base register #1";
19498 extra_cost = -1;
19501 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19503 extra_cost = 1;
19504 type = "update";
19506 break;
19508 case PRE_MODIFY:
19509 reg = XEXP (addr, 0);
19510 plus_arg1 = XEXP (addr, 1);
19511 if (!base_reg_operand (reg, GET_MODE (reg))
19512 || GET_CODE (plus_arg1) != PLUS
19513 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19515 fail_msg = "bad PRE_MODIFY";
19516 extra_cost = -1;
19519 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19521 extra_cost = 1;
19522 type = "update";
19524 break;
19526 /* Do we need to simulate AND -16 to clear the bottom address bits used
19527 in VMX load/stores? Only allow the AND for vector sizes. */
19528 case AND:
19529 and_arg = XEXP (addr, 0);
19530 if (GET_MODE_SIZE (mode) != 16
19531 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19532 || INTVAL (XEXP (addr, 1)) != -16)
19534 fail_msg = "bad Altivec AND #1";
19535 extra_cost = -1;
19538 if (rclass != ALTIVEC_REGS)
19540 if (legitimate_indirect_address_p (and_arg, false))
19541 extra_cost = 1;
19543 else if (legitimate_indexed_address_p (and_arg, false))
19544 extra_cost = 2;
19546 else
19548 fail_msg = "bad Altivec AND #2";
19549 extra_cost = -1;
19552 type = "and";
19554 break;
19556 /* If this is an indirect address, make sure it is a base register. */
19557 case REG:
19558 case SUBREG:
19559 if (!legitimate_indirect_address_p (addr, false))
19561 extra_cost = 1;
19562 type = "move";
19564 break;
19566 /* If this is an indexed address, make sure the register class can handle
19567 indexed addresses for this mode. */
19568 case PLUS:
19569 plus_arg0 = XEXP (addr, 0);
19570 plus_arg1 = XEXP (addr, 1);
19572 /* (plus (plus (reg) (constant)) (constant)) is generated during
19573 push_reload processing, so handle it now. */
19574 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19576 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19578 extra_cost = 1;
19579 type = "offset";
19583 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19584 push_reload processing, so handle it now. */
19585 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19587 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19589 extra_cost = 1;
19590 type = "indexed #2";
19594 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19596 fail_msg = "no base register #2";
19597 extra_cost = -1;
19600 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19602 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19603 || !legitimate_indexed_address_p (addr, false))
19605 extra_cost = 1;
19606 type = "indexed";
19610 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19611 && CONST_INT_P (plus_arg1))
19613 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19615 extra_cost = 1;
19616 type = "vector d-form offset";
19620 /* Make sure the register class can handle offset addresses. */
19621 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19623 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19625 extra_cost = 1;
19626 type = "offset #2";
19630 else
19632 fail_msg = "bad PLUS";
19633 extra_cost = -1;
19636 break;
19638 case LO_SUM:
19639 /* Quad offsets are restricted and can't handle normal addresses. */
19640 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19642 extra_cost = -1;
19643 type = "vector d-form lo_sum";
19646 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19648 fail_msg = "bad LO_SUM";
19649 extra_cost = -1;
19652 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19654 extra_cost = 1;
19655 type = "lo_sum";
19657 break;
19659 /* Static addresses need to create a TOC entry. */
19660 case CONST:
19661 case SYMBOL_REF:
19662 case LABEL_REF:
19663 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19665 extra_cost = -1;
19666 type = "vector d-form lo_sum #2";
19669 else
19671 type = "address";
19672 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19674 break;
19676 /* TOC references look like offsetable memory. */
19677 case UNSPEC:
19678 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19680 fail_msg = "bad UNSPEC";
19681 extra_cost = -1;
19684 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19686 extra_cost = -1;
19687 type = "vector d-form lo_sum #3";
19690 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19692 extra_cost = 1;
19693 type = "toc reference";
19695 break;
19697 default:
19699 fail_msg = "bad address";
19700 extra_cost = -1;
19704 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19706 if (extra_cost < 0)
19707 fprintf (stderr,
19708 "rs6000_secondary_reload_memory error: mode = %s, "
19709 "class = %s, addr_mask = '%s', %s\n",
19710 GET_MODE_NAME (mode),
19711 reg_class_names[rclass],
19712 rs6000_debug_addr_mask (addr_mask, false),
19713 (fail_msg != NULL) ? fail_msg : "<bad address>");
19715 else
19716 fprintf (stderr,
19717 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19718 "addr_mask = '%s', extra cost = %d, %s\n",
19719 GET_MODE_NAME (mode),
19720 reg_class_names[rclass],
19721 rs6000_debug_addr_mask (addr_mask, false),
19722 extra_cost,
19723 (type) ? type : "<none>");
19725 debug_rtx (addr);
19728 return extra_cost;
19731 /* Helper function for rs6000_secondary_reload to return true if a move to a
19732 different register classe is really a simple move. */
19734 static bool
19735 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19736 enum rs6000_reg_type from_type,
19737 machine_mode mode)
19739 int size = GET_MODE_SIZE (mode);
19741 /* Add support for various direct moves available. In this function, we only
19742 look at cases where we don't need any extra registers, and one or more
19743 simple move insns are issued. Originally small integers are not allowed
19744 in FPR/VSX registers. Single precision binary floating is not a simple
19745 move because we need to convert to the single precision memory layout.
19746 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19747 need special direct move handling, which we do not support yet. */
19748 if (TARGET_DIRECT_MOVE
19749 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19750 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19752 if (TARGET_POWERPC64)
19754 /* ISA 2.07: MTVSRD or MVFVSRD. */
19755 if (size == 8)
19756 return true;
19758 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19759 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19760 return true;
19763 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19764 if (TARGET_P8_VECTOR)
19766 if (mode == SImode)
19767 return true;
19769 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19770 return true;
19773 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19774 if (mode == SDmode)
19775 return true;
19778 /* Power6+: MFTGPR or MFFGPR. */
19779 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19780 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19781 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19782 return true;
19784 /* Move to/from SPR. */
19785 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19786 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19787 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19788 return true;
19790 return false;
19793 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19794 special direct moves that involve allocating an extra register, return the
19795 insn code of the helper function if there is such a function or
19796 CODE_FOR_nothing if not. */
19798 static bool
19799 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19800 enum rs6000_reg_type from_type,
19801 machine_mode mode,
19802 secondary_reload_info *sri,
19803 bool altivec_p)
19805 bool ret = false;
19806 enum insn_code icode = CODE_FOR_nothing;
19807 int cost = 0;
19808 int size = GET_MODE_SIZE (mode);
19810 if (TARGET_POWERPC64 && size == 16)
19812 /* Handle moving 128-bit values from GPRs to VSX point registers on
19813 ISA 2.07 (power8, power9) when running in 64-bit mode using
19814 XXPERMDI to glue the two 64-bit values back together. */
19815 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19817 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19818 icode = reg_addr[mode].reload_vsx_gpr;
19821 /* Handle moving 128-bit values from VSX point registers to GPRs on
19822 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19823 bottom 64-bit value. */
19824 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19826 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19827 icode = reg_addr[mode].reload_gpr_vsx;
19831 else if (TARGET_POWERPC64 && mode == SFmode)
19833 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19835 cost = 3; /* xscvdpspn, mfvsrd, and. */
19836 icode = reg_addr[mode].reload_gpr_vsx;
19839 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19841 cost = 2; /* mtvsrz, xscvspdpn. */
19842 icode = reg_addr[mode].reload_vsx_gpr;
19846 else if (!TARGET_POWERPC64 && size == 8)
19848 /* Handle moving 64-bit values from GPRs to floating point registers on
19849 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19850 32-bit values back together. Altivec register classes must be handled
19851 specially since a different instruction is used, and the secondary
19852 reload support requires a single instruction class in the scratch
19853 register constraint. However, right now TFmode is not allowed in
19854 Altivec registers, so the pattern will never match. */
19855 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19857 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19858 icode = reg_addr[mode].reload_fpr_gpr;
19862 if (icode != CODE_FOR_nothing)
19864 ret = true;
19865 if (sri)
19867 sri->icode = icode;
19868 sri->extra_cost = cost;
19872 return ret;
19875 /* Return whether a move between two register classes can be done either
19876 directly (simple move) or via a pattern that uses a single extra temporary
19877 (using ISA 2.07's direct move in this case. */
19879 static bool
19880 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19881 enum rs6000_reg_type from_type,
19882 machine_mode mode,
19883 secondary_reload_info *sri,
19884 bool altivec_p)
19886 /* Fall back to load/store reloads if either type is not a register. */
19887 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19888 return false;
19890 /* If we haven't allocated registers yet, assume the move can be done for the
19891 standard register types. */
19892 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19893 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19894 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19895 return true;
19897 /* Moves to the same set of registers is a simple move for non-specialized
19898 registers. */
19899 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19900 return true;
19902 /* Check whether a simple move can be done directly. */
19903 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19905 if (sri)
19907 sri->icode = CODE_FOR_nothing;
19908 sri->extra_cost = 0;
19910 return true;
19913 /* Now check if we can do it in a few steps. */
19914 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19915 altivec_p);
19918 /* Inform reload about cases where moving X with a mode MODE to a register in
19919 RCLASS requires an extra scratch or immediate register. Return the class
19920 needed for the immediate register.
19922 For VSX and Altivec, we may need a register to convert sp+offset into
19923 reg+sp.
19925 For misaligned 64-bit gpr loads and stores we need a register to
19926 convert an offset address to indirect. */
19928 static reg_class_t
19929 rs6000_secondary_reload (bool in_p,
19930 rtx x,
19931 reg_class_t rclass_i,
19932 machine_mode mode,
19933 secondary_reload_info *sri)
19935 enum reg_class rclass = (enum reg_class) rclass_i;
19936 reg_class_t ret = ALL_REGS;
19937 enum insn_code icode;
19938 bool default_p = false;
19939 bool done_p = false;
19941 /* Allow subreg of memory before/during reload. */
19942 bool memory_p = (MEM_P (x)
19943 || (!reload_completed && GET_CODE (x) == SUBREG
19944 && MEM_P (SUBREG_REG (x))));
19946 sri->icode = CODE_FOR_nothing;
19947 sri->t_icode = CODE_FOR_nothing;
19948 sri->extra_cost = 0;
19949 icode = ((in_p)
19950 ? reg_addr[mode].reload_load
19951 : reg_addr[mode].reload_store);
19953 if (REG_P (x) || register_operand (x, mode))
19955 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19956 bool altivec_p = (rclass == ALTIVEC_REGS);
19957 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19959 if (!in_p)
19960 std::swap (to_type, from_type);
19962 /* Can we do a direct move of some sort? */
19963 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19964 altivec_p))
19966 icode = (enum insn_code)sri->icode;
19967 default_p = false;
19968 done_p = true;
19969 ret = NO_REGS;
19973 /* Make sure 0.0 is not reloaded or forced into memory. */
19974 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19976 ret = NO_REGS;
19977 default_p = false;
19978 done_p = true;
19981 /* If this is a scalar floating point value and we want to load it into the
19982 traditional Altivec registers, do it via a move via a traditional floating
19983 point register, unless we have D-form addressing. Also make sure that
19984 non-zero constants use a FPR. */
19985 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19986 && !mode_supports_vmx_dform (mode)
19987 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19988 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19990 ret = FLOAT_REGS;
19991 default_p = false;
19992 done_p = true;
19995 /* Handle reload of load/stores if we have reload helper functions. */
19996 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19998 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19999 mode);
20001 if (extra_cost >= 0)
20003 done_p = true;
20004 ret = NO_REGS;
20005 if (extra_cost > 0)
20007 sri->extra_cost = extra_cost;
20008 sri->icode = icode;
20013 /* Handle unaligned loads and stores of integer registers. */
20014 if (!done_p && TARGET_POWERPC64
20015 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20016 && memory_p
20017 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
20019 rtx addr = XEXP (x, 0);
20020 rtx off = address_offset (addr);
20022 if (off != NULL_RTX)
20024 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20025 unsigned HOST_WIDE_INT offset = INTVAL (off);
20027 /* We need a secondary reload when our legitimate_address_p
20028 says the address is good (as otherwise the entire address
20029 will be reloaded), and the offset is not a multiple of
20030 four or we have an address wrap. Address wrap will only
20031 occur for LO_SUMs since legitimate_offset_address_p
20032 rejects addresses for 16-byte mems that will wrap. */
20033 if (GET_CODE (addr) == LO_SUM
20034 ? (1 /* legitimate_address_p allows any offset for lo_sum */
20035 && ((offset & 3) != 0
20036 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
20037 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
20038 && (offset & 3) != 0))
20040 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
20041 if (in_p)
20042 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
20043 : CODE_FOR_reload_di_load);
20044 else
20045 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
20046 : CODE_FOR_reload_di_store);
20047 sri->extra_cost = 2;
20048 ret = NO_REGS;
20049 done_p = true;
20051 else
20052 default_p = true;
20054 else
20055 default_p = true;
20058 if (!done_p && !TARGET_POWERPC64
20059 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
20060 && memory_p
20061 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
20063 rtx addr = XEXP (x, 0);
20064 rtx off = address_offset (addr);
20066 if (off != NULL_RTX)
20068 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
20069 unsigned HOST_WIDE_INT offset = INTVAL (off);
20071 /* We need a secondary reload when our legitimate_address_p
20072 says the address is good (as otherwise the entire address
20073 will be reloaded), and we have a wrap.
20075 legitimate_lo_sum_address_p allows LO_SUM addresses to
20076 have any offset so test for wrap in the low 16 bits.
20078 legitimate_offset_address_p checks for the range
20079 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
20080 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
20081 [0x7ff4,0x7fff] respectively, so test for the
20082 intersection of these ranges, [0x7ffc,0x7fff] and
20083 [0x7ff4,0x7ff7] respectively.
20085 Note that the address we see here may have been
20086 manipulated by legitimize_reload_address. */
20087 if (GET_CODE (addr) == LO_SUM
20088 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
20089 : offset - (0x8000 - extra) < UNITS_PER_WORD)
20091 if (in_p)
20092 sri->icode = CODE_FOR_reload_si_load;
20093 else
20094 sri->icode = CODE_FOR_reload_si_store;
20095 sri->extra_cost = 2;
20096 ret = NO_REGS;
20097 done_p = true;
20099 else
20100 default_p = true;
20102 else
20103 default_p = true;
20106 if (!done_p)
20107 default_p = true;
20109 if (default_p)
20110 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
20112 gcc_assert (ret != ALL_REGS);
20114 if (TARGET_DEBUG_ADDR)
20116 fprintf (stderr,
20117 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20118 "mode = %s",
20119 reg_class_names[ret],
20120 in_p ? "true" : "false",
20121 reg_class_names[rclass],
20122 GET_MODE_NAME (mode));
20124 if (reload_completed)
20125 fputs (", after reload", stderr);
20127 if (!done_p)
20128 fputs (", done_p not set", stderr);
20130 if (default_p)
20131 fputs (", default secondary reload", stderr);
20133 if (sri->icode != CODE_FOR_nothing)
20134 fprintf (stderr, ", reload func = %s, extra cost = %d",
20135 insn_data[sri->icode].name, sri->extra_cost);
20137 else if (sri->extra_cost > 0)
20138 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20140 fputs ("\n", stderr);
20141 debug_rtx (x);
20144 return ret;
20147 /* Better tracing for rs6000_secondary_reload_inner. */
20149 static void
20150 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20151 bool store_p)
20153 rtx set, clobber;
20155 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20157 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20158 store_p ? "store" : "load");
20160 if (store_p)
20161 set = gen_rtx_SET (mem, reg);
20162 else
20163 set = gen_rtx_SET (reg, mem);
20165 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20166 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20169 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20170 ATTRIBUTE_NORETURN;
20172 static void
20173 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20174 bool store_p)
20176 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20177 gcc_unreachable ();
20180 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20181 reload helper functions. These were identified in
20182 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20183 reload, it calls the insns:
20184 reload_<RELOAD:mode>_<P:mptrsize>_store
20185 reload_<RELOAD:mode>_<P:mptrsize>_load
20187 which in turn calls this function, to do whatever is necessary to create
20188 valid addresses. */
20190 void
20191 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20193 int regno = true_regnum (reg);
20194 machine_mode mode = GET_MODE (reg);
20195 addr_mask_type addr_mask;
20196 rtx addr;
20197 rtx new_addr;
20198 rtx op_reg, op0, op1;
20199 rtx and_op;
20200 rtx cc_clobber;
20201 rtvec rv;
20203 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20204 || !base_reg_operand (scratch, GET_MODE (scratch)))
20205 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20207 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20208 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20210 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20211 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20213 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20214 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20216 else
20217 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20219 /* Make sure the mode is valid in this register class. */
20220 if ((addr_mask & RELOAD_REG_VALID) == 0)
20221 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20223 if (TARGET_DEBUG_ADDR)
20224 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20226 new_addr = addr = XEXP (mem, 0);
20227 switch (GET_CODE (addr))
20229 /* Does the register class support auto update forms for this mode? If
20230 not, do the update now. We don't need a scratch register, since the
20231 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20232 case PRE_INC:
20233 case PRE_DEC:
20234 op_reg = XEXP (addr, 0);
20235 if (!base_reg_operand (op_reg, Pmode))
20236 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20238 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20240 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20241 new_addr = op_reg;
20243 break;
20245 case PRE_MODIFY:
20246 op0 = XEXP (addr, 0);
20247 op1 = XEXP (addr, 1);
20248 if (!base_reg_operand (op0, Pmode)
20249 || GET_CODE (op1) != PLUS
20250 || !rtx_equal_p (op0, XEXP (op1, 0)))
20251 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20253 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20255 emit_insn (gen_rtx_SET (op0, op1));
20256 new_addr = reg;
20258 break;
20260 /* Do we need to simulate AND -16 to clear the bottom address bits used
20261 in VMX load/stores? */
20262 case AND:
20263 op0 = XEXP (addr, 0);
20264 op1 = XEXP (addr, 1);
20265 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20267 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20268 op_reg = op0;
20270 else if (GET_CODE (op1) == PLUS)
20272 emit_insn (gen_rtx_SET (scratch, op1));
20273 op_reg = scratch;
20276 else
20277 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20279 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20280 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20281 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20282 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20283 new_addr = scratch;
20285 break;
20287 /* If this is an indirect address, make sure it is a base register. */
20288 case REG:
20289 case SUBREG:
20290 if (!base_reg_operand (addr, GET_MODE (addr)))
20292 emit_insn (gen_rtx_SET (scratch, addr));
20293 new_addr = scratch;
20295 break;
20297 /* If this is an indexed address, make sure the register class can handle
20298 indexed addresses for this mode. */
20299 case PLUS:
20300 op0 = XEXP (addr, 0);
20301 op1 = XEXP (addr, 1);
20302 if (!base_reg_operand (op0, Pmode))
20303 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20305 else if (int_reg_operand (op1, Pmode))
20307 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20309 emit_insn (gen_rtx_SET (scratch, addr));
20310 new_addr = scratch;
20314 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20316 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20317 || !quad_address_p (addr, mode, false))
20319 emit_insn (gen_rtx_SET (scratch, addr));
20320 new_addr = scratch;
20324 /* Make sure the register class can handle offset addresses. */
20325 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20327 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20329 emit_insn (gen_rtx_SET (scratch, addr));
20330 new_addr = scratch;
20334 else
20335 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20337 break;
20339 case LO_SUM:
20340 op0 = XEXP (addr, 0);
20341 op1 = XEXP (addr, 1);
20342 if (!base_reg_operand (op0, Pmode))
20343 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20345 else if (int_reg_operand (op1, Pmode))
20347 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20349 emit_insn (gen_rtx_SET (scratch, addr));
20350 new_addr = scratch;
20354 /* Quad offsets are restricted and can't handle normal addresses. */
20355 else if (mode_supports_vsx_dform_quad (mode))
20357 emit_insn (gen_rtx_SET (scratch, addr));
20358 new_addr = scratch;
20361 /* Make sure the register class can handle offset addresses. */
20362 else if (legitimate_lo_sum_address_p (mode, addr, false))
20364 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20366 emit_insn (gen_rtx_SET (scratch, addr));
20367 new_addr = scratch;
20371 else
20372 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20374 break;
20376 case SYMBOL_REF:
20377 case CONST:
20378 case LABEL_REF:
20379 rs6000_emit_move (scratch, addr, Pmode);
20380 new_addr = scratch;
20381 break;
20383 default:
20384 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20387 /* Adjust the address if it changed. */
20388 if (addr != new_addr)
20390 mem = replace_equiv_address_nv (mem, new_addr);
20391 if (TARGET_DEBUG_ADDR)
20392 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20395 /* Now create the move. */
20396 if (store_p)
20397 emit_insn (gen_rtx_SET (mem, reg));
20398 else
20399 emit_insn (gen_rtx_SET (reg, mem));
20401 return;
20404 /* Convert reloads involving 64-bit gprs and misaligned offset
20405 addressing, or multiple 32-bit gprs and offsets that are too large,
20406 to use indirect addressing. */
20408 void
20409 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20411 int regno = true_regnum (reg);
20412 enum reg_class rclass;
20413 rtx addr;
20414 rtx scratch_or_premodify = scratch;
20416 if (TARGET_DEBUG_ADDR)
20418 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20419 store_p ? "store" : "load");
20420 fprintf (stderr, "reg:\n");
20421 debug_rtx (reg);
20422 fprintf (stderr, "mem:\n");
20423 debug_rtx (mem);
20424 fprintf (stderr, "scratch:\n");
20425 debug_rtx (scratch);
20428 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20429 gcc_assert (GET_CODE (mem) == MEM);
20430 rclass = REGNO_REG_CLASS (regno);
20431 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20432 addr = XEXP (mem, 0);
20434 if (GET_CODE (addr) == PRE_MODIFY)
20436 gcc_assert (REG_P (XEXP (addr, 0))
20437 && GET_CODE (XEXP (addr, 1)) == PLUS
20438 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20439 scratch_or_premodify = XEXP (addr, 0);
20440 if (!HARD_REGISTER_P (scratch_or_premodify))
20441 /* If we have a pseudo here then reload will have arranged
20442 to have it replaced, but only in the original insn.
20443 Use the replacement here too. */
20444 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20446 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20447 expressions from the original insn, without unsharing them.
20448 Any RTL that points into the original insn will of course
20449 have register replacements applied. That is why we don't
20450 need to look for replacements under the PLUS. */
20451 addr = XEXP (addr, 1);
20453 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20455 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20457 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20459 /* Now create the move. */
20460 if (store_p)
20461 emit_insn (gen_rtx_SET (mem, reg));
20462 else
20463 emit_insn (gen_rtx_SET (reg, mem));
20465 return;
20468 /* Given an rtx X being reloaded into a reg required to be
20469 in class CLASS, return the class of reg to actually use.
20470 In general this is just CLASS; but on some machines
20471 in some cases it is preferable to use a more restrictive class.
20473 On the RS/6000, we have to return NO_REGS when we want to reload a
20474 floating-point CONST_DOUBLE to force it to be copied to memory.
20476 We also don't want to reload integer values into floating-point
20477 registers if we can at all help it. In fact, this can
20478 cause reload to die, if it tries to generate a reload of CTR
20479 into a FP register and discovers it doesn't have the memory location
20480 required.
20482 ??? Would it be a good idea to have reload do the converse, that is
20483 try to reload floating modes into FP registers if possible?
20486 static enum reg_class
20487 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20489 machine_mode mode = GET_MODE (x);
20490 bool is_constant = CONSTANT_P (x);
20492 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20493 reload class for it. */
20494 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20495 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20496 return NO_REGS;
20498 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20499 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20500 return NO_REGS;
20502 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20503 the reloading of address expressions using PLUS into floating point
20504 registers. */
20505 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20507 if (is_constant)
20509 /* Zero is always allowed in all VSX registers. */
20510 if (x == CONST0_RTX (mode))
20511 return rclass;
20513 /* If this is a vector constant that can be formed with a few Altivec
20514 instructions, we want altivec registers. */
20515 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20516 return ALTIVEC_REGS;
20518 /* If this is an integer constant that can easily be loaded into
20519 vector registers, allow it. */
20520 if (CONST_INT_P (x))
20522 HOST_WIDE_INT value = INTVAL (x);
20524 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20525 2.06 can generate it in the Altivec registers with
20526 VSPLTI<x>. */
20527 if (value == -1)
20529 if (TARGET_P8_VECTOR)
20530 return rclass;
20531 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20532 return ALTIVEC_REGS;
20533 else
20534 return NO_REGS;
20537 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20538 a sign extend in the Altivec registers. */
20539 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20540 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20541 return ALTIVEC_REGS;
20544 /* Force constant to memory. */
20545 return NO_REGS;
20548 /* D-form addressing can easily reload the value. */
20549 if (mode_supports_vmx_dform (mode)
20550 || mode_supports_vsx_dform_quad (mode))
20551 return rclass;
20553 /* If this is a scalar floating point value and we don't have D-form
20554 addressing, prefer the traditional floating point registers so that we
20555 can use D-form (register+offset) addressing. */
20556 if (rclass == VSX_REGS
20557 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20558 return FLOAT_REGS;
20560 /* Prefer the Altivec registers if Altivec is handling the vector
20561 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20562 loads. */
20563 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20564 || mode == V1TImode)
20565 return ALTIVEC_REGS;
20567 return rclass;
20570 if (is_constant || GET_CODE (x) == PLUS)
20572 if (reg_class_subset_p (GENERAL_REGS, rclass))
20573 return GENERAL_REGS;
20574 if (reg_class_subset_p (BASE_REGS, rclass))
20575 return BASE_REGS;
20576 return NO_REGS;
20579 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20580 return GENERAL_REGS;
20582 return rclass;
20585 /* Debug version of rs6000_preferred_reload_class. */
20586 static enum reg_class
20587 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20589 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20591 fprintf (stderr,
20592 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20593 "mode = %s, x:\n",
20594 reg_class_names[ret], reg_class_names[rclass],
20595 GET_MODE_NAME (GET_MODE (x)));
20596 debug_rtx (x);
20598 return ret;
20601 /* If we are copying between FP or AltiVec registers and anything else, we need
20602 a memory location. The exception is when we are targeting ppc64 and the
20603 move to/from fpr to gpr instructions are available. Also, under VSX, you
20604 can copy vector registers from the FP register set to the Altivec register
20605 set and vice versa. */
20607 static bool
20608 rs6000_secondary_memory_needed (machine_mode mode,
20609 reg_class_t from_class,
20610 reg_class_t to_class)
20612 enum rs6000_reg_type from_type, to_type;
20613 bool altivec_p = ((from_class == ALTIVEC_REGS)
20614 || (to_class == ALTIVEC_REGS));
20616 /* If a simple/direct move is available, we don't need secondary memory */
20617 from_type = reg_class_to_reg_type[(int)from_class];
20618 to_type = reg_class_to_reg_type[(int)to_class];
20620 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20621 (secondary_reload_info *)0, altivec_p))
20622 return false;
20624 /* If we have a floating point or vector register class, we need to use
20625 memory to transfer the data. */
20626 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20627 return true;
20629 return false;
20632 /* Debug version of rs6000_secondary_memory_needed. */
20633 static bool
20634 rs6000_debug_secondary_memory_needed (machine_mode mode,
20635 reg_class_t from_class,
20636 reg_class_t to_class)
20638 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20640 fprintf (stderr,
20641 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20642 "to_class = %s, mode = %s\n",
20643 ret ? "true" : "false",
20644 reg_class_names[from_class],
20645 reg_class_names[to_class],
20646 GET_MODE_NAME (mode));
20648 return ret;
20651 /* Return the register class of a scratch register needed to copy IN into
20652 or out of a register in RCLASS in MODE. If it can be done directly,
20653 NO_REGS is returned. */
20655 static enum reg_class
20656 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20657 rtx in)
20659 int regno;
20661 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20662 #if TARGET_MACHO
20663 && MACHOPIC_INDIRECT
20664 #endif
20667 /* We cannot copy a symbolic operand directly into anything
20668 other than BASE_REGS for TARGET_ELF. So indicate that a
20669 register from BASE_REGS is needed as an intermediate
20670 register.
20672 On Darwin, pic addresses require a load from memory, which
20673 needs a base register. */
20674 if (rclass != BASE_REGS
20675 && (GET_CODE (in) == SYMBOL_REF
20676 || GET_CODE (in) == HIGH
20677 || GET_CODE (in) == LABEL_REF
20678 || GET_CODE (in) == CONST))
20679 return BASE_REGS;
20682 if (GET_CODE (in) == REG)
20684 regno = REGNO (in);
20685 if (regno >= FIRST_PSEUDO_REGISTER)
20687 regno = true_regnum (in);
20688 if (regno >= FIRST_PSEUDO_REGISTER)
20689 regno = -1;
20692 else if (GET_CODE (in) == SUBREG)
20694 regno = true_regnum (in);
20695 if (regno >= FIRST_PSEUDO_REGISTER)
20696 regno = -1;
20698 else
20699 regno = -1;
20701 /* If we have VSX register moves, prefer moving scalar values between
20702 Altivec registers and GPR by going via an FPR (and then via memory)
20703 instead of reloading the secondary memory address for Altivec moves. */
20704 if (TARGET_VSX
20705 && GET_MODE_SIZE (mode) < 16
20706 && !mode_supports_vmx_dform (mode)
20707 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20708 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20709 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20710 && (regno >= 0 && INT_REGNO_P (regno)))))
20711 return FLOAT_REGS;
20713 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20714 into anything. */
20715 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20716 || (regno >= 0 && INT_REGNO_P (regno)))
20717 return NO_REGS;
20719 /* Constants, memory, and VSX registers can go into VSX registers (both the
20720 traditional floating point and the altivec registers). */
20721 if (rclass == VSX_REGS
20722 && (regno == -1 || VSX_REGNO_P (regno)))
20723 return NO_REGS;
20725 /* Constants, memory, and FP registers can go into FP registers. */
20726 if ((regno == -1 || FP_REGNO_P (regno))
20727 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20728 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20730 /* Memory, and AltiVec registers can go into AltiVec registers. */
20731 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20732 && rclass == ALTIVEC_REGS)
20733 return NO_REGS;
20735 /* We can copy among the CR registers. */
20736 if ((rclass == CR_REGS || rclass == CR0_REGS)
20737 && regno >= 0 && CR_REGNO_P (regno))
20738 return NO_REGS;
20740 /* Otherwise, we need GENERAL_REGS. */
20741 return GENERAL_REGS;
20744 /* Debug version of rs6000_secondary_reload_class. */
20745 static enum reg_class
20746 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20747 machine_mode mode, rtx in)
20749 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20750 fprintf (stderr,
20751 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20752 "mode = %s, input rtx:\n",
20753 reg_class_names[ret], reg_class_names[rclass],
20754 GET_MODE_NAME (mode));
20755 debug_rtx (in);
20757 return ret;
20760 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20762 static bool
20763 rs6000_can_change_mode_class (machine_mode from,
20764 machine_mode to,
20765 reg_class_t rclass)
20767 unsigned from_size = GET_MODE_SIZE (from);
20768 unsigned to_size = GET_MODE_SIZE (to);
20770 if (from_size != to_size)
20772 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20774 if (reg_classes_intersect_p (xclass, rclass))
20776 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20777 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20778 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20779 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20781 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20782 single register under VSX because the scalar part of the register
20783 is in the upper 64-bits, and not the lower 64-bits. Types like
20784 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20785 IEEE floating point can't overlap, and neither can small
20786 values. */
20788 if (to_float128_vector_p && from_float128_vector_p)
20789 return true;
20791 else if (to_float128_vector_p || from_float128_vector_p)
20792 return false;
20794 /* TDmode in floating-mode registers must always go into a register
20795 pair with the most significant word in the even-numbered register
20796 to match ISA requirements. In little-endian mode, this does not
20797 match subreg numbering, so we cannot allow subregs. */
20798 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20799 return false;
20801 if (from_size < 8 || to_size < 8)
20802 return false;
20804 if (from_size == 8 && (8 * to_nregs) != to_size)
20805 return false;
20807 if (to_size == 8 && (8 * from_nregs) != from_size)
20808 return false;
20810 return true;
20812 else
20813 return true;
20816 /* Since the VSX register set includes traditional floating point registers
20817 and altivec registers, just check for the size being different instead of
20818 trying to check whether the modes are vector modes. Otherwise it won't
20819 allow say DF and DI to change classes. For types like TFmode and TDmode
20820 that take 2 64-bit registers, rather than a single 128-bit register, don't
20821 allow subregs of those types to other 128 bit types. */
20822 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20824 unsigned num_regs = (from_size + 15) / 16;
20825 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20826 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20827 return false;
20829 return (from_size == 8 || from_size == 16);
20832 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20833 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20834 return false;
20836 return true;
20839 /* Debug version of rs6000_can_change_mode_class. */
20840 static bool
20841 rs6000_debug_can_change_mode_class (machine_mode from,
20842 machine_mode to,
20843 reg_class_t rclass)
20845 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20847 fprintf (stderr,
20848 "rs6000_can_change_mode_class, return %s, from = %s, "
20849 "to = %s, rclass = %s\n",
20850 ret ? "true" : "false",
20851 GET_MODE_NAME (from), GET_MODE_NAME (to),
20852 reg_class_names[rclass]);
20854 return ret;
20857 /* Return a string to do a move operation of 128 bits of data. */
20859 const char *
20860 rs6000_output_move_128bit (rtx operands[])
20862 rtx dest = operands[0];
20863 rtx src = operands[1];
20864 machine_mode mode = GET_MODE (dest);
20865 int dest_regno;
20866 int src_regno;
20867 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20868 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20870 if (REG_P (dest))
20872 dest_regno = REGNO (dest);
20873 dest_gpr_p = INT_REGNO_P (dest_regno);
20874 dest_fp_p = FP_REGNO_P (dest_regno);
20875 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20876 dest_vsx_p = dest_fp_p | dest_vmx_p;
20878 else
20880 dest_regno = -1;
20881 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20884 if (REG_P (src))
20886 src_regno = REGNO (src);
20887 src_gpr_p = INT_REGNO_P (src_regno);
20888 src_fp_p = FP_REGNO_P (src_regno);
20889 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20890 src_vsx_p = src_fp_p | src_vmx_p;
20892 else
20894 src_regno = -1;
20895 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20898 /* Register moves. */
20899 if (dest_regno >= 0 && src_regno >= 0)
20901 if (dest_gpr_p)
20903 if (src_gpr_p)
20904 return "#";
20906 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20907 return (WORDS_BIG_ENDIAN
20908 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20909 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20911 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20912 return "#";
20915 else if (TARGET_VSX && dest_vsx_p)
20917 if (src_vsx_p)
20918 return "xxlor %x0,%x1,%x1";
20920 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20921 return (WORDS_BIG_ENDIAN
20922 ? "mtvsrdd %x0,%1,%L1"
20923 : "mtvsrdd %x0,%L1,%1");
20925 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20926 return "#";
20929 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20930 return "vor %0,%1,%1";
20932 else if (dest_fp_p && src_fp_p)
20933 return "#";
20936 /* Loads. */
20937 else if (dest_regno >= 0 && MEM_P (src))
20939 if (dest_gpr_p)
20941 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20942 return "lq %0,%1";
20943 else
20944 return "#";
20947 else if (TARGET_ALTIVEC && dest_vmx_p
20948 && altivec_indexed_or_indirect_operand (src, mode))
20949 return "lvx %0,%y1";
20951 else if (TARGET_VSX && dest_vsx_p)
20953 if (mode_supports_vsx_dform_quad (mode)
20954 && quad_address_p (XEXP (src, 0), mode, true))
20955 return "lxv %x0,%1";
20957 else if (TARGET_P9_VECTOR)
20958 return "lxvx %x0,%y1";
20960 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20961 return "lxvw4x %x0,%y1";
20963 else
20964 return "lxvd2x %x0,%y1";
20967 else if (TARGET_ALTIVEC && dest_vmx_p)
20968 return "lvx %0,%y1";
20970 else if (dest_fp_p)
20971 return "#";
20974 /* Stores. */
20975 else if (src_regno >= 0 && MEM_P (dest))
20977 if (src_gpr_p)
20979 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20980 return "stq %1,%0";
20981 else
20982 return "#";
20985 else if (TARGET_ALTIVEC && src_vmx_p
20986 && altivec_indexed_or_indirect_operand (src, mode))
20987 return "stvx %1,%y0";
20989 else if (TARGET_VSX && src_vsx_p)
20991 if (mode_supports_vsx_dform_quad (mode)
20992 && quad_address_p (XEXP (dest, 0), mode, true))
20993 return "stxv %x1,%0";
20995 else if (TARGET_P9_VECTOR)
20996 return "stxvx %x1,%y0";
20998 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20999 return "stxvw4x %x1,%y0";
21001 else
21002 return "stxvd2x %x1,%y0";
21005 else if (TARGET_ALTIVEC && src_vmx_p)
21006 return "stvx %1,%y0";
21008 else if (src_fp_p)
21009 return "#";
21012 /* Constants. */
21013 else if (dest_regno >= 0
21014 && (GET_CODE (src) == CONST_INT
21015 || GET_CODE (src) == CONST_WIDE_INT
21016 || GET_CODE (src) == CONST_DOUBLE
21017 || GET_CODE (src) == CONST_VECTOR))
21019 if (dest_gpr_p)
21020 return "#";
21022 else if ((dest_vmx_p && TARGET_ALTIVEC)
21023 || (dest_vsx_p && TARGET_VSX))
21024 return output_vec_const_move (operands);
21027 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
21030 /* Validate a 128-bit move. */
21031 bool
21032 rs6000_move_128bit_ok_p (rtx operands[])
21034 machine_mode mode = GET_MODE (operands[0]);
21035 return (gpc_reg_operand (operands[0], mode)
21036 || gpc_reg_operand (operands[1], mode));
21039 /* Return true if a 128-bit move needs to be split. */
21040 bool
21041 rs6000_split_128bit_ok_p (rtx operands[])
21043 if (!reload_completed)
21044 return false;
21046 if (!gpr_or_gpr_p (operands[0], operands[1]))
21047 return false;
21049 if (quad_load_store_p (operands[0], operands[1]))
21050 return false;
21052 return true;
21056 /* Given a comparison operation, return the bit number in CCR to test. We
21057 know this is a valid comparison.
21059 SCC_P is 1 if this is for an scc. That means that %D will have been
21060 used instead of %C, so the bits will be in different places.
21062 Return -1 if OP isn't a valid comparison for some reason. */
21065 ccr_bit (rtx op, int scc_p)
21067 enum rtx_code code = GET_CODE (op);
21068 machine_mode cc_mode;
21069 int cc_regnum;
21070 int base_bit;
21071 rtx reg;
21073 if (!COMPARISON_P (op))
21074 return -1;
21076 reg = XEXP (op, 0);
21078 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
21080 cc_mode = GET_MODE (reg);
21081 cc_regnum = REGNO (reg);
21082 base_bit = 4 * (cc_regnum - CR0_REGNO);
21084 validate_condition_mode (code, cc_mode);
21086 /* When generating a sCOND operation, only positive conditions are
21087 allowed. */
21088 gcc_assert (!scc_p
21089 || code == EQ || code == GT || code == LT || code == UNORDERED
21090 || code == GTU || code == LTU);
21092 switch (code)
21094 case NE:
21095 return scc_p ? base_bit + 3 : base_bit + 2;
21096 case EQ:
21097 return base_bit + 2;
21098 case GT: case GTU: case UNLE:
21099 return base_bit + 1;
21100 case LT: case LTU: case UNGE:
21101 return base_bit;
21102 case ORDERED: case UNORDERED:
21103 return base_bit + 3;
21105 case GE: case GEU:
21106 /* If scc, we will have done a cror to put the bit in the
21107 unordered position. So test that bit. For integer, this is ! LT
21108 unless this is an scc insn. */
21109 return scc_p ? base_bit + 3 : base_bit;
21111 case LE: case LEU:
21112 return scc_p ? base_bit + 3 : base_bit + 1;
21114 default:
21115 gcc_unreachable ();
21119 /* Return the GOT register. */
21122 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21124 /* The second flow pass currently (June 1999) can't update
21125 regs_ever_live without disturbing other parts of the compiler, so
21126 update it here to make the prolog/epilogue code happy. */
21127 if (!can_create_pseudo_p ()
21128 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21129 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21131 crtl->uses_pic_offset_table = 1;
21133 return pic_offset_table_rtx;
21136 static rs6000_stack_t stack_info;
21138 /* Function to init struct machine_function.
21139 This will be called, via a pointer variable,
21140 from push_function_context. */
21142 static struct machine_function *
21143 rs6000_init_machine_status (void)
21145 stack_info.reload_completed = 0;
21146 return ggc_cleared_alloc<machine_function> ();
21149 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21151 /* Write out a function code label. */
21153 void
21154 rs6000_output_function_entry (FILE *file, const char *fname)
21156 if (fname[0] != '.')
21158 switch (DEFAULT_ABI)
21160 default:
21161 gcc_unreachable ();
21163 case ABI_AIX:
21164 if (DOT_SYMBOLS)
21165 putc ('.', file);
21166 else
21167 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21168 break;
21170 case ABI_ELFv2:
21171 case ABI_V4:
21172 case ABI_DARWIN:
21173 break;
21177 RS6000_OUTPUT_BASENAME (file, fname);
21180 /* Print an operand. Recognize special options, documented below. */
21182 #if TARGET_ELF
21183 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21184 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21185 #else
21186 #define SMALL_DATA_RELOC "sda21"
21187 #define SMALL_DATA_REG 0
21188 #endif
21190 void
21191 print_operand (FILE *file, rtx x, int code)
21193 int i;
21194 unsigned HOST_WIDE_INT uval;
21196 switch (code)
21198 /* %a is output_address. */
21200 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21201 output_operand. */
21203 case 'D':
21204 /* Like 'J' but get to the GT bit only. */
21205 gcc_assert (REG_P (x));
21207 /* Bit 1 is GT bit. */
21208 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21210 /* Add one for shift count in rlinm for scc. */
21211 fprintf (file, "%d", i + 1);
21212 return;
21214 case 'e':
21215 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21216 if (! INT_P (x))
21218 output_operand_lossage ("invalid %%e value");
21219 return;
21222 uval = INTVAL (x);
21223 if ((uval & 0xffff) == 0 && uval != 0)
21224 putc ('s', file);
21225 return;
21227 case 'E':
21228 /* X is a CR register. Print the number of the EQ bit of the CR */
21229 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21230 output_operand_lossage ("invalid %%E value");
21231 else
21232 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21233 return;
21235 case 'f':
21236 /* X is a CR register. Print the shift count needed to move it
21237 to the high-order four bits. */
21238 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21239 output_operand_lossage ("invalid %%f value");
21240 else
21241 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21242 return;
21244 case 'F':
21245 /* Similar, but print the count for the rotate in the opposite
21246 direction. */
21247 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21248 output_operand_lossage ("invalid %%F value");
21249 else
21250 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21251 return;
21253 case 'G':
21254 /* X is a constant integer. If it is negative, print "m",
21255 otherwise print "z". This is to make an aze or ame insn. */
21256 if (GET_CODE (x) != CONST_INT)
21257 output_operand_lossage ("invalid %%G value");
21258 else if (INTVAL (x) >= 0)
21259 putc ('z', file);
21260 else
21261 putc ('m', file);
21262 return;
21264 case 'h':
21265 /* If constant, output low-order five bits. Otherwise, write
21266 normally. */
21267 if (INT_P (x))
21268 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21269 else
21270 print_operand (file, x, 0);
21271 return;
21273 case 'H':
21274 /* If constant, output low-order six bits. Otherwise, write
21275 normally. */
21276 if (INT_P (x))
21277 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21278 else
21279 print_operand (file, x, 0);
21280 return;
21282 case 'I':
21283 /* Print `i' if this is a constant, else nothing. */
21284 if (INT_P (x))
21285 putc ('i', file);
21286 return;
21288 case 'j':
21289 /* Write the bit number in CCR for jump. */
21290 i = ccr_bit (x, 0);
21291 if (i == -1)
21292 output_operand_lossage ("invalid %%j code");
21293 else
21294 fprintf (file, "%d", i);
21295 return;
21297 case 'J':
21298 /* Similar, but add one for shift count in rlinm for scc and pass
21299 scc flag to `ccr_bit'. */
21300 i = ccr_bit (x, 1);
21301 if (i == -1)
21302 output_operand_lossage ("invalid %%J code");
21303 else
21304 /* If we want bit 31, write a shift count of zero, not 32. */
21305 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21306 return;
21308 case 'k':
21309 /* X must be a constant. Write the 1's complement of the
21310 constant. */
21311 if (! INT_P (x))
21312 output_operand_lossage ("invalid %%k value");
21313 else
21314 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21315 return;
21317 case 'K':
21318 /* X must be a symbolic constant on ELF. Write an
21319 expression suitable for an 'addi' that adds in the low 16
21320 bits of the MEM. */
21321 if (GET_CODE (x) == CONST)
21323 if (GET_CODE (XEXP (x, 0)) != PLUS
21324 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21325 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21326 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21327 output_operand_lossage ("invalid %%K value");
21329 print_operand_address (file, x);
21330 fputs ("@l", file);
21331 return;
21333 /* %l is output_asm_label. */
21335 case 'L':
21336 /* Write second word of DImode or DFmode reference. Works on register
21337 or non-indexed memory only. */
21338 if (REG_P (x))
21339 fputs (reg_names[REGNO (x) + 1], file);
21340 else if (MEM_P (x))
21342 machine_mode mode = GET_MODE (x);
21343 /* Handle possible auto-increment. Since it is pre-increment and
21344 we have already done it, we can just use an offset of word. */
21345 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21346 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21347 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21348 UNITS_PER_WORD));
21349 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21350 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21351 UNITS_PER_WORD));
21352 else
21353 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21354 UNITS_PER_WORD),
21355 0));
21357 if (small_data_operand (x, GET_MODE (x)))
21358 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21359 reg_names[SMALL_DATA_REG]);
21361 return;
21363 case 'N':
21364 /* Write the number of elements in the vector times 4. */
21365 if (GET_CODE (x) != PARALLEL)
21366 output_operand_lossage ("invalid %%N value");
21367 else
21368 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21369 return;
21371 case 'O':
21372 /* Similar, but subtract 1 first. */
21373 if (GET_CODE (x) != PARALLEL)
21374 output_operand_lossage ("invalid %%O value");
21375 else
21376 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21377 return;
21379 case 'p':
21380 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21381 if (! INT_P (x)
21382 || INTVAL (x) < 0
21383 || (i = exact_log2 (INTVAL (x))) < 0)
21384 output_operand_lossage ("invalid %%p value");
21385 else
21386 fprintf (file, "%d", i);
21387 return;
21389 case 'P':
21390 /* The operand must be an indirect memory reference. The result
21391 is the register name. */
21392 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21393 || REGNO (XEXP (x, 0)) >= 32)
21394 output_operand_lossage ("invalid %%P value");
21395 else
21396 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21397 return;
21399 case 'q':
21400 /* This outputs the logical code corresponding to a boolean
21401 expression. The expression may have one or both operands
21402 negated (if one, only the first one). For condition register
21403 logical operations, it will also treat the negated
21404 CR codes as NOTs, but not handle NOTs of them. */
21406 const char *const *t = 0;
21407 const char *s;
21408 enum rtx_code code = GET_CODE (x);
21409 static const char * const tbl[3][3] = {
21410 { "and", "andc", "nor" },
21411 { "or", "orc", "nand" },
21412 { "xor", "eqv", "xor" } };
21414 if (code == AND)
21415 t = tbl[0];
21416 else if (code == IOR)
21417 t = tbl[1];
21418 else if (code == XOR)
21419 t = tbl[2];
21420 else
21421 output_operand_lossage ("invalid %%q value");
21423 if (GET_CODE (XEXP (x, 0)) != NOT)
21424 s = t[0];
21425 else
21427 if (GET_CODE (XEXP (x, 1)) == NOT)
21428 s = t[2];
21429 else
21430 s = t[1];
21433 fputs (s, file);
21435 return;
21437 case 'Q':
21438 if (! TARGET_MFCRF)
21439 return;
21440 fputc (',', file);
21441 /* FALLTHRU */
21443 case 'R':
21444 /* X is a CR register. Print the mask for `mtcrf'. */
21445 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21446 output_operand_lossage ("invalid %%R value");
21447 else
21448 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21449 return;
21451 case 's':
21452 /* Low 5 bits of 32 - value */
21453 if (! INT_P (x))
21454 output_operand_lossage ("invalid %%s value");
21455 else
21456 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21457 return;
21459 case 't':
21460 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21461 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21463 /* Bit 3 is OV bit. */
21464 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21466 /* If we want bit 31, write a shift count of zero, not 32. */
21467 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21468 return;
21470 case 'T':
21471 /* Print the symbolic name of a branch target register. */
21472 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21473 && REGNO (x) != CTR_REGNO))
21474 output_operand_lossage ("invalid %%T value");
21475 else if (REGNO (x) == LR_REGNO)
21476 fputs ("lr", file);
21477 else
21478 fputs ("ctr", file);
21479 return;
21481 case 'u':
21482 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21483 for use in unsigned operand. */
21484 if (! INT_P (x))
21486 output_operand_lossage ("invalid %%u value");
21487 return;
21490 uval = INTVAL (x);
21491 if ((uval & 0xffff) == 0)
21492 uval >>= 16;
21494 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21495 return;
21497 case 'v':
21498 /* High-order 16 bits of constant for use in signed operand. */
21499 if (! INT_P (x))
21500 output_operand_lossage ("invalid %%v value");
21501 else
21502 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21503 (INTVAL (x) >> 16) & 0xffff);
21504 return;
21506 case 'U':
21507 /* Print `u' if this has an auto-increment or auto-decrement. */
21508 if (MEM_P (x)
21509 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21510 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21511 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21512 putc ('u', file);
21513 return;
21515 case 'V':
21516 /* Print the trap code for this operand. */
21517 switch (GET_CODE (x))
21519 case EQ:
21520 fputs ("eq", file); /* 4 */
21521 break;
21522 case NE:
21523 fputs ("ne", file); /* 24 */
21524 break;
21525 case LT:
21526 fputs ("lt", file); /* 16 */
21527 break;
21528 case LE:
21529 fputs ("le", file); /* 20 */
21530 break;
21531 case GT:
21532 fputs ("gt", file); /* 8 */
21533 break;
21534 case GE:
21535 fputs ("ge", file); /* 12 */
21536 break;
21537 case LTU:
21538 fputs ("llt", file); /* 2 */
21539 break;
21540 case LEU:
21541 fputs ("lle", file); /* 6 */
21542 break;
21543 case GTU:
21544 fputs ("lgt", file); /* 1 */
21545 break;
21546 case GEU:
21547 fputs ("lge", file); /* 5 */
21548 break;
21549 default:
21550 gcc_unreachable ();
21552 break;
21554 case 'w':
21555 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21556 normally. */
21557 if (INT_P (x))
21558 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21559 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21560 else
21561 print_operand (file, x, 0);
21562 return;
21564 case 'x':
21565 /* X is a FPR or Altivec register used in a VSX context. */
21566 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21567 output_operand_lossage ("invalid %%x value");
21568 else
21570 int reg = REGNO (x);
21571 int vsx_reg = (FP_REGNO_P (reg)
21572 ? reg - 32
21573 : reg - FIRST_ALTIVEC_REGNO + 32);
21575 #ifdef TARGET_REGNAMES
21576 if (TARGET_REGNAMES)
21577 fprintf (file, "%%vs%d", vsx_reg);
21578 else
21579 #endif
21580 fprintf (file, "%d", vsx_reg);
21582 return;
21584 case 'X':
21585 if (MEM_P (x)
21586 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21587 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21588 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21589 putc ('x', file);
21590 return;
21592 case 'Y':
21593 /* Like 'L', for third word of TImode/PTImode */
21594 if (REG_P (x))
21595 fputs (reg_names[REGNO (x) + 2], file);
21596 else if (MEM_P (x))
21598 machine_mode mode = GET_MODE (x);
21599 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21600 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21601 output_address (mode, plus_constant (Pmode,
21602 XEXP (XEXP (x, 0), 0), 8));
21603 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21604 output_address (mode, plus_constant (Pmode,
21605 XEXP (XEXP (x, 0), 0), 8));
21606 else
21607 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21608 if (small_data_operand (x, GET_MODE (x)))
21609 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21610 reg_names[SMALL_DATA_REG]);
21612 return;
21614 case 'z':
21615 /* X is a SYMBOL_REF. Write out the name preceded by a
21616 period and without any trailing data in brackets. Used for function
21617 names. If we are configured for System V (or the embedded ABI) on
21618 the PowerPC, do not emit the period, since those systems do not use
21619 TOCs and the like. */
21620 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21622 /* For macho, check to see if we need a stub. */
21623 if (TARGET_MACHO)
21625 const char *name = XSTR (x, 0);
21626 #if TARGET_MACHO
21627 if (darwin_emit_branch_islands
21628 && MACHOPIC_INDIRECT
21629 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21630 name = machopic_indirection_name (x, /*stub_p=*/true);
21631 #endif
21632 assemble_name (file, name);
21634 else if (!DOT_SYMBOLS)
21635 assemble_name (file, XSTR (x, 0));
21636 else
21637 rs6000_output_function_entry (file, XSTR (x, 0));
21638 return;
21640 case 'Z':
21641 /* Like 'L', for last word of TImode/PTImode. */
21642 if (REG_P (x))
21643 fputs (reg_names[REGNO (x) + 3], file);
21644 else if (MEM_P (x))
21646 machine_mode mode = GET_MODE (x);
21647 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21648 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21649 output_address (mode, plus_constant (Pmode,
21650 XEXP (XEXP (x, 0), 0), 12));
21651 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21652 output_address (mode, plus_constant (Pmode,
21653 XEXP (XEXP (x, 0), 0), 12));
21654 else
21655 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21656 if (small_data_operand (x, GET_MODE (x)))
21657 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21658 reg_names[SMALL_DATA_REG]);
21660 return;
21662 /* Print AltiVec memory operand. */
21663 case 'y':
21665 rtx tmp;
21667 gcc_assert (MEM_P (x));
21669 tmp = XEXP (x, 0);
21671 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21672 && GET_CODE (tmp) == AND
21673 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21674 && INTVAL (XEXP (tmp, 1)) == -16)
21675 tmp = XEXP (tmp, 0);
21676 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21677 && GET_CODE (tmp) == PRE_MODIFY)
21678 tmp = XEXP (tmp, 1);
21679 if (REG_P (tmp))
21680 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21681 else
21683 if (GET_CODE (tmp) != PLUS
21684 || !REG_P (XEXP (tmp, 0))
21685 || !REG_P (XEXP (tmp, 1)))
21687 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21688 break;
21691 if (REGNO (XEXP (tmp, 0)) == 0)
21692 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21693 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21694 else
21695 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21696 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21698 break;
21701 case 0:
21702 if (REG_P (x))
21703 fprintf (file, "%s", reg_names[REGNO (x)]);
21704 else if (MEM_P (x))
21706 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21707 know the width from the mode. */
21708 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21709 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21710 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21711 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21712 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21713 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21714 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21715 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21716 else
21717 output_address (GET_MODE (x), XEXP (x, 0));
21719 else
21721 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21722 /* This hack along with a corresponding hack in
21723 rs6000_output_addr_const_extra arranges to output addends
21724 where the assembler expects to find them. eg.
21725 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21726 without this hack would be output as "x@toc+4". We
21727 want "x+4@toc". */
21728 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21729 else
21730 output_addr_const (file, x);
21732 return;
21734 case '&':
21735 if (const char *name = get_some_local_dynamic_name ())
21736 assemble_name (file, name);
21737 else
21738 output_operand_lossage ("'%%&' used without any "
21739 "local dynamic TLS references");
21740 return;
21742 default:
21743 output_operand_lossage ("invalid %%xn code");
21747 /* Print the address of an operand. */
21749 void
21750 print_operand_address (FILE *file, rtx x)
21752 if (REG_P (x))
21753 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21754 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21755 || GET_CODE (x) == LABEL_REF)
21757 output_addr_const (file, x);
21758 if (small_data_operand (x, GET_MODE (x)))
21759 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21760 reg_names[SMALL_DATA_REG]);
21761 else
21762 gcc_assert (!TARGET_TOC);
21764 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21765 && REG_P (XEXP (x, 1)))
21767 if (REGNO (XEXP (x, 0)) == 0)
21768 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21769 reg_names[ REGNO (XEXP (x, 0)) ]);
21770 else
21771 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21772 reg_names[ REGNO (XEXP (x, 1)) ]);
21774 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21775 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21776 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21777 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21778 #if TARGET_MACHO
21779 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21780 && CONSTANT_P (XEXP (x, 1)))
21782 fprintf (file, "lo16(");
21783 output_addr_const (file, XEXP (x, 1));
21784 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21786 #endif
21787 #if TARGET_ELF
21788 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21789 && CONSTANT_P (XEXP (x, 1)))
21791 output_addr_const (file, XEXP (x, 1));
21792 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21794 #endif
21795 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21797 /* This hack along with a corresponding hack in
21798 rs6000_output_addr_const_extra arranges to output addends
21799 where the assembler expects to find them. eg.
21800 (lo_sum (reg 9)
21801 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21802 without this hack would be output as "x@toc+8@l(9)". We
21803 want "x+8@toc@l(9)". */
21804 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21805 if (GET_CODE (x) == LO_SUM)
21806 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21807 else
21808 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21810 else
21811 gcc_unreachable ();
21814 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21816 static bool
21817 rs6000_output_addr_const_extra (FILE *file, rtx x)
21819 if (GET_CODE (x) == UNSPEC)
21820 switch (XINT (x, 1))
21822 case UNSPEC_TOCREL:
21823 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21824 && REG_P (XVECEXP (x, 0, 1))
21825 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21826 output_addr_const (file, XVECEXP (x, 0, 0));
21827 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21829 if (INTVAL (tocrel_offset_oac) >= 0)
21830 fprintf (file, "+");
21831 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21833 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21835 putc ('-', file);
21836 assemble_name (file, toc_label_name);
21837 need_toc_init = 1;
21839 else if (TARGET_ELF)
21840 fputs ("@toc", file);
21841 return true;
21843 #if TARGET_MACHO
21844 case UNSPEC_MACHOPIC_OFFSET:
21845 output_addr_const (file, XVECEXP (x, 0, 0));
21846 putc ('-', file);
21847 machopic_output_function_base_name (file);
21848 return true;
21849 #endif
21851 return false;
21854 /* Target hook for assembling integer objects. The PowerPC version has
21855 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21856 is defined. It also needs to handle DI-mode objects on 64-bit
21857 targets. */
21859 static bool
21860 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21862 #ifdef RELOCATABLE_NEEDS_FIXUP
21863 /* Special handling for SI values. */
21864 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21866 static int recurse = 0;
21868 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21869 the .fixup section. Since the TOC section is already relocated, we
21870 don't need to mark it here. We used to skip the text section, but it
21871 should never be valid for relocated addresses to be placed in the text
21872 section. */
21873 if (DEFAULT_ABI == ABI_V4
21874 && (TARGET_RELOCATABLE || flag_pic > 1)
21875 && in_section != toc_section
21876 && !recurse
21877 && !CONST_SCALAR_INT_P (x)
21878 && CONSTANT_P (x))
21880 char buf[256];
21882 recurse = 1;
21883 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21884 fixuplabelno++;
21885 ASM_OUTPUT_LABEL (asm_out_file, buf);
21886 fprintf (asm_out_file, "\t.long\t(");
21887 output_addr_const (asm_out_file, x);
21888 fprintf (asm_out_file, ")@fixup\n");
21889 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21890 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21891 fprintf (asm_out_file, "\t.long\t");
21892 assemble_name (asm_out_file, buf);
21893 fprintf (asm_out_file, "\n\t.previous\n");
21894 recurse = 0;
21895 return true;
21897 /* Remove initial .'s to turn a -mcall-aixdesc function
21898 address into the address of the descriptor, not the function
21899 itself. */
21900 else if (GET_CODE (x) == SYMBOL_REF
21901 && XSTR (x, 0)[0] == '.'
21902 && DEFAULT_ABI == ABI_AIX)
21904 const char *name = XSTR (x, 0);
21905 while (*name == '.')
21906 name++;
21908 fprintf (asm_out_file, "\t.long\t%s\n", name);
21909 return true;
21912 #endif /* RELOCATABLE_NEEDS_FIXUP */
21913 return default_assemble_integer (x, size, aligned_p);
21916 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21917 /* Emit an assembler directive to set symbol visibility for DECL to
21918 VISIBILITY_TYPE. */
21920 static void
21921 rs6000_assemble_visibility (tree decl, int vis)
21923 if (TARGET_XCOFF)
21924 return;
21926 /* Functions need to have their entry point symbol visibility set as
21927 well as their descriptor symbol visibility. */
21928 if (DEFAULT_ABI == ABI_AIX
21929 && DOT_SYMBOLS
21930 && TREE_CODE (decl) == FUNCTION_DECL)
21932 static const char * const visibility_types[] = {
21933 NULL, "protected", "hidden", "internal"
21936 const char *name, *type;
21938 name = ((* targetm.strip_name_encoding)
21939 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21940 type = visibility_types[vis];
21942 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21943 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21945 else
21946 default_assemble_visibility (decl, vis);
21948 #endif
21950 enum rtx_code
21951 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21953 /* Reversal of FP compares takes care -- an ordered compare
21954 becomes an unordered compare and vice versa. */
21955 if (mode == CCFPmode
21956 && (!flag_finite_math_only
21957 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21958 || code == UNEQ || code == LTGT))
21959 return reverse_condition_maybe_unordered (code);
21960 else
21961 return reverse_condition (code);
21964 /* Generate a compare for CODE. Return a brand-new rtx that
21965 represents the result of the compare. */
21967 static rtx
21968 rs6000_generate_compare (rtx cmp, machine_mode mode)
21970 machine_mode comp_mode;
21971 rtx compare_result;
21972 enum rtx_code code = GET_CODE (cmp);
21973 rtx op0 = XEXP (cmp, 0);
21974 rtx op1 = XEXP (cmp, 1);
21976 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21977 comp_mode = CCmode;
21978 else if (FLOAT_MODE_P (mode))
21979 comp_mode = CCFPmode;
21980 else if (code == GTU || code == LTU
21981 || code == GEU || code == LEU)
21982 comp_mode = CCUNSmode;
21983 else if ((code == EQ || code == NE)
21984 && unsigned_reg_p (op0)
21985 && (unsigned_reg_p (op1)
21986 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21987 /* These are unsigned values, perhaps there will be a later
21988 ordering compare that can be shared with this one. */
21989 comp_mode = CCUNSmode;
21990 else
21991 comp_mode = CCmode;
21993 /* If we have an unsigned compare, make sure we don't have a signed value as
21994 an immediate. */
21995 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21996 && INTVAL (op1) < 0)
21998 op0 = copy_rtx_if_shared (op0);
21999 op1 = force_reg (GET_MODE (op0), op1);
22000 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
22003 /* First, the compare. */
22004 compare_result = gen_reg_rtx (comp_mode);
22006 /* IEEE 128-bit support in VSX registers when we do not have hardware
22007 support. */
22008 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
22010 rtx libfunc = NULL_RTX;
22011 bool check_nan = false;
22012 rtx dest;
22014 switch (code)
22016 case EQ:
22017 case NE:
22018 libfunc = optab_libfunc (eq_optab, mode);
22019 break;
22021 case GT:
22022 case GE:
22023 libfunc = optab_libfunc (ge_optab, mode);
22024 break;
22026 case LT:
22027 case LE:
22028 libfunc = optab_libfunc (le_optab, mode);
22029 break;
22031 case UNORDERED:
22032 case ORDERED:
22033 libfunc = optab_libfunc (unord_optab, mode);
22034 code = (code == UNORDERED) ? NE : EQ;
22035 break;
22037 case UNGE:
22038 case UNGT:
22039 check_nan = true;
22040 libfunc = optab_libfunc (ge_optab, mode);
22041 code = (code == UNGE) ? GE : GT;
22042 break;
22044 case UNLE:
22045 case UNLT:
22046 check_nan = true;
22047 libfunc = optab_libfunc (le_optab, mode);
22048 code = (code == UNLE) ? LE : LT;
22049 break;
22051 case UNEQ:
22052 case LTGT:
22053 check_nan = true;
22054 libfunc = optab_libfunc (eq_optab, mode);
22055 code = (code = UNEQ) ? EQ : NE;
22056 break;
22058 default:
22059 gcc_unreachable ();
22062 gcc_assert (libfunc);
22064 if (!check_nan)
22065 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22066 SImode, op0, mode, op1, mode);
22068 /* The library signals an exception for signalling NaNs, so we need to
22069 handle isgreater, etc. by first checking isordered. */
22070 else
22072 rtx ne_rtx, normal_dest, unord_dest;
22073 rtx unord_func = optab_libfunc (unord_optab, mode);
22074 rtx join_label = gen_label_rtx ();
22075 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22076 rtx unord_cmp = gen_reg_rtx (comp_mode);
22079 /* Test for either value being a NaN. */
22080 gcc_assert (unord_func);
22081 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22082 SImode, op0, mode, op1, mode);
22084 /* Set value (0) if either value is a NaN, and jump to the join
22085 label. */
22086 dest = gen_reg_rtx (SImode);
22087 emit_move_insn (dest, const1_rtx);
22088 emit_insn (gen_rtx_SET (unord_cmp,
22089 gen_rtx_COMPARE (comp_mode, unord_dest,
22090 const0_rtx)));
22092 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22093 emit_jump_insn (gen_rtx_SET (pc_rtx,
22094 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22095 join_ref,
22096 pc_rtx)));
22098 /* Do the normal comparison, knowing that the values are not
22099 NaNs. */
22100 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22101 SImode, op0, mode, op1, mode);
22103 emit_insn (gen_cstoresi4 (dest,
22104 gen_rtx_fmt_ee (code, SImode, normal_dest,
22105 const0_rtx),
22106 normal_dest, const0_rtx));
22108 /* Join NaN and non-Nan paths. Compare dest against 0. */
22109 emit_label (join_label);
22110 code = NE;
22113 emit_insn (gen_rtx_SET (compare_result,
22114 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22117 else
22119 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22120 CLOBBERs to match cmptf_internal2 pattern. */
22121 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22122 && FLOAT128_IBM_P (GET_MODE (op0))
22123 && TARGET_HARD_FLOAT)
22124 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22125 gen_rtvec (10,
22126 gen_rtx_SET (compare_result,
22127 gen_rtx_COMPARE (comp_mode, op0, op1)),
22128 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22129 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22130 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22131 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22132 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22133 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22134 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22135 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22136 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22137 else if (GET_CODE (op1) == UNSPEC
22138 && XINT (op1, 1) == UNSPEC_SP_TEST)
22140 rtx op1b = XVECEXP (op1, 0, 0);
22141 comp_mode = CCEQmode;
22142 compare_result = gen_reg_rtx (CCEQmode);
22143 if (TARGET_64BIT)
22144 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22145 else
22146 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22148 else
22149 emit_insn (gen_rtx_SET (compare_result,
22150 gen_rtx_COMPARE (comp_mode, op0, op1)));
22153 /* Some kinds of FP comparisons need an OR operation;
22154 under flag_finite_math_only we don't bother. */
22155 if (FLOAT_MODE_P (mode)
22156 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22157 && !flag_finite_math_only
22158 && (code == LE || code == GE
22159 || code == UNEQ || code == LTGT
22160 || code == UNGT || code == UNLT))
22162 enum rtx_code or1, or2;
22163 rtx or1_rtx, or2_rtx, compare2_rtx;
22164 rtx or_result = gen_reg_rtx (CCEQmode);
22166 switch (code)
22168 case LE: or1 = LT; or2 = EQ; break;
22169 case GE: or1 = GT; or2 = EQ; break;
22170 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22171 case LTGT: or1 = LT; or2 = GT; break;
22172 case UNGT: or1 = UNORDERED; or2 = GT; break;
22173 case UNLT: or1 = UNORDERED; or2 = LT; break;
22174 default: gcc_unreachable ();
22176 validate_condition_mode (or1, comp_mode);
22177 validate_condition_mode (or2, comp_mode);
22178 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22179 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22180 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22181 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22182 const_true_rtx);
22183 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22185 compare_result = or_result;
22186 code = EQ;
22189 validate_condition_mode (code, GET_MODE (compare_result));
22191 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22195 /* Return the diagnostic message string if the binary operation OP is
22196 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22198 static const char*
22199 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22200 const_tree type1,
22201 const_tree type2)
22203 machine_mode mode1 = TYPE_MODE (type1);
22204 machine_mode mode2 = TYPE_MODE (type2);
22206 /* For complex modes, use the inner type. */
22207 if (COMPLEX_MODE_P (mode1))
22208 mode1 = GET_MODE_INNER (mode1);
22210 if (COMPLEX_MODE_P (mode2))
22211 mode2 = GET_MODE_INNER (mode2);
22213 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22214 double to intermix unless -mfloat128-convert. */
22215 if (mode1 == mode2)
22216 return NULL;
22218 if (!TARGET_FLOAT128_CVT)
22220 if ((mode1 == KFmode && mode2 == IFmode)
22221 || (mode1 == IFmode && mode2 == KFmode))
22222 return N_("__float128 and __ibm128 cannot be used in the same "
22223 "expression");
22225 if (TARGET_IEEEQUAD
22226 && ((mode1 == IFmode && mode2 == TFmode)
22227 || (mode1 == TFmode && mode2 == IFmode)))
22228 return N_("__ibm128 and long double cannot be used in the same "
22229 "expression");
22231 if (!TARGET_IEEEQUAD
22232 && ((mode1 == KFmode && mode2 == TFmode)
22233 || (mode1 == TFmode && mode2 == KFmode)))
22234 return N_("__float128 and long double cannot be used in the same "
22235 "expression");
22238 return NULL;
22242 /* Expand floating point conversion to/from __float128 and __ibm128. */
22244 void
22245 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22247 machine_mode dest_mode = GET_MODE (dest);
22248 machine_mode src_mode = GET_MODE (src);
22249 convert_optab cvt = unknown_optab;
22250 bool do_move = false;
22251 rtx libfunc = NULL_RTX;
22252 rtx dest2;
22253 typedef rtx (*rtx_2func_t) (rtx, rtx);
22254 rtx_2func_t hw_convert = (rtx_2func_t)0;
22255 size_t kf_or_tf;
22257 struct hw_conv_t {
22258 rtx_2func_t from_df;
22259 rtx_2func_t from_sf;
22260 rtx_2func_t from_si_sign;
22261 rtx_2func_t from_si_uns;
22262 rtx_2func_t from_di_sign;
22263 rtx_2func_t from_di_uns;
22264 rtx_2func_t to_df;
22265 rtx_2func_t to_sf;
22266 rtx_2func_t to_si_sign;
22267 rtx_2func_t to_si_uns;
22268 rtx_2func_t to_di_sign;
22269 rtx_2func_t to_di_uns;
22270 } hw_conversions[2] = {
22271 /* convertions to/from KFmode */
22273 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22274 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22275 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22276 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22277 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22278 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22279 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22280 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22281 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22282 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22283 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22284 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22287 /* convertions to/from TFmode */
22289 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22290 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22291 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22292 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22293 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22294 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22295 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22296 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22297 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22298 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22299 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22300 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22304 if (dest_mode == src_mode)
22305 gcc_unreachable ();
22307 /* Eliminate memory operations. */
22308 if (MEM_P (src))
22309 src = force_reg (src_mode, src);
22311 if (MEM_P (dest))
22313 rtx tmp = gen_reg_rtx (dest_mode);
22314 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22315 rs6000_emit_move (dest, tmp, dest_mode);
22316 return;
22319 /* Convert to IEEE 128-bit floating point. */
22320 if (FLOAT128_IEEE_P (dest_mode))
22322 if (dest_mode == KFmode)
22323 kf_or_tf = 0;
22324 else if (dest_mode == TFmode)
22325 kf_or_tf = 1;
22326 else
22327 gcc_unreachable ();
22329 switch (src_mode)
22331 case E_DFmode:
22332 cvt = sext_optab;
22333 hw_convert = hw_conversions[kf_or_tf].from_df;
22334 break;
22336 case E_SFmode:
22337 cvt = sext_optab;
22338 hw_convert = hw_conversions[kf_or_tf].from_sf;
22339 break;
22341 case E_KFmode:
22342 case E_IFmode:
22343 case E_TFmode:
22344 if (FLOAT128_IBM_P (src_mode))
22345 cvt = sext_optab;
22346 else
22347 do_move = true;
22348 break;
22350 case E_SImode:
22351 if (unsigned_p)
22353 cvt = ufloat_optab;
22354 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22356 else
22358 cvt = sfloat_optab;
22359 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22361 break;
22363 case E_DImode:
22364 if (unsigned_p)
22366 cvt = ufloat_optab;
22367 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22369 else
22371 cvt = sfloat_optab;
22372 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22374 break;
22376 default:
22377 gcc_unreachable ();
22381 /* Convert from IEEE 128-bit floating point. */
22382 else if (FLOAT128_IEEE_P (src_mode))
22384 if (src_mode == KFmode)
22385 kf_or_tf = 0;
22386 else if (src_mode == TFmode)
22387 kf_or_tf = 1;
22388 else
22389 gcc_unreachable ();
22391 switch (dest_mode)
22393 case E_DFmode:
22394 cvt = trunc_optab;
22395 hw_convert = hw_conversions[kf_or_tf].to_df;
22396 break;
22398 case E_SFmode:
22399 cvt = trunc_optab;
22400 hw_convert = hw_conversions[kf_or_tf].to_sf;
22401 break;
22403 case E_KFmode:
22404 case E_IFmode:
22405 case E_TFmode:
22406 if (FLOAT128_IBM_P (dest_mode))
22407 cvt = trunc_optab;
22408 else
22409 do_move = true;
22410 break;
22412 case E_SImode:
22413 if (unsigned_p)
22415 cvt = ufix_optab;
22416 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22418 else
22420 cvt = sfix_optab;
22421 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22423 break;
22425 case E_DImode:
22426 if (unsigned_p)
22428 cvt = ufix_optab;
22429 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22431 else
22433 cvt = sfix_optab;
22434 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22436 break;
22438 default:
22439 gcc_unreachable ();
22443 /* Both IBM format. */
22444 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22445 do_move = true;
22447 else
22448 gcc_unreachable ();
22450 /* Handle conversion between TFmode/KFmode. */
22451 if (do_move)
22452 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22454 /* Handle conversion if we have hardware support. */
22455 else if (TARGET_FLOAT128_HW && hw_convert)
22456 emit_insn ((hw_convert) (dest, src));
22458 /* Call an external function to do the conversion. */
22459 else if (cvt != unknown_optab)
22461 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22462 gcc_assert (libfunc != NULL_RTX);
22464 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22465 src, src_mode);
22467 gcc_assert (dest2 != NULL_RTX);
22468 if (!rtx_equal_p (dest, dest2))
22469 emit_move_insn (dest, dest2);
22472 else
22473 gcc_unreachable ();
22475 return;
22479 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22480 can be used as that dest register. Return the dest register. */
22483 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22485 if (op2 == const0_rtx)
22486 return op1;
22488 if (GET_CODE (scratch) == SCRATCH)
22489 scratch = gen_reg_rtx (mode);
22491 if (logical_operand (op2, mode))
22492 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22493 else
22494 emit_insn (gen_rtx_SET (scratch,
22495 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22497 return scratch;
22500 void
22501 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22503 rtx condition_rtx;
22504 machine_mode op_mode;
22505 enum rtx_code cond_code;
22506 rtx result = operands[0];
22508 condition_rtx = rs6000_generate_compare (operands[1], mode);
22509 cond_code = GET_CODE (condition_rtx);
22511 if (cond_code == NE
22512 || cond_code == GE || cond_code == LE
22513 || cond_code == GEU || cond_code == LEU
22514 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22516 rtx not_result = gen_reg_rtx (CCEQmode);
22517 rtx not_op, rev_cond_rtx;
22518 machine_mode cc_mode;
22520 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22522 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22523 SImode, XEXP (condition_rtx, 0), const0_rtx);
22524 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22525 emit_insn (gen_rtx_SET (not_result, not_op));
22526 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22529 op_mode = GET_MODE (XEXP (operands[1], 0));
22530 if (op_mode == VOIDmode)
22531 op_mode = GET_MODE (XEXP (operands[1], 1));
22533 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22535 PUT_MODE (condition_rtx, DImode);
22536 convert_move (result, condition_rtx, 0);
22538 else
22540 PUT_MODE (condition_rtx, SImode);
22541 emit_insn (gen_rtx_SET (result, condition_rtx));
22545 /* Emit a branch of kind CODE to location LOC. */
22547 void
22548 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22550 rtx condition_rtx, loc_ref;
22552 condition_rtx = rs6000_generate_compare (operands[0], mode);
22553 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22554 emit_jump_insn (gen_rtx_SET (pc_rtx,
22555 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22556 loc_ref, pc_rtx)));
22559 /* Return the string to output a conditional branch to LABEL, which is
22560 the operand template of the label, or NULL if the branch is really a
22561 conditional return.
22563 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22564 condition code register and its mode specifies what kind of
22565 comparison we made.
22567 REVERSED is nonzero if we should reverse the sense of the comparison.
22569 INSN is the insn. */
22571 char *
22572 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22574 static char string[64];
22575 enum rtx_code code = GET_CODE (op);
22576 rtx cc_reg = XEXP (op, 0);
22577 machine_mode mode = GET_MODE (cc_reg);
22578 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22579 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22580 int really_reversed = reversed ^ need_longbranch;
22581 char *s = string;
22582 const char *ccode;
22583 const char *pred;
22584 rtx note;
22586 validate_condition_mode (code, mode);
22588 /* Work out which way this really branches. We could use
22589 reverse_condition_maybe_unordered here always but this
22590 makes the resulting assembler clearer. */
22591 if (really_reversed)
22593 /* Reversal of FP compares takes care -- an ordered compare
22594 becomes an unordered compare and vice versa. */
22595 if (mode == CCFPmode)
22596 code = reverse_condition_maybe_unordered (code);
22597 else
22598 code = reverse_condition (code);
22601 switch (code)
22603 /* Not all of these are actually distinct opcodes, but
22604 we distinguish them for clarity of the resulting assembler. */
22605 case NE: case LTGT:
22606 ccode = "ne"; break;
22607 case EQ: case UNEQ:
22608 ccode = "eq"; break;
22609 case GE: case GEU:
22610 ccode = "ge"; break;
22611 case GT: case GTU: case UNGT:
22612 ccode = "gt"; break;
22613 case LE: case LEU:
22614 ccode = "le"; break;
22615 case LT: case LTU: case UNLT:
22616 ccode = "lt"; break;
22617 case UNORDERED: ccode = "un"; break;
22618 case ORDERED: ccode = "nu"; break;
22619 case UNGE: ccode = "nl"; break;
22620 case UNLE: ccode = "ng"; break;
22621 default:
22622 gcc_unreachable ();
22625 /* Maybe we have a guess as to how likely the branch is. */
22626 pred = "";
22627 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22628 if (note != NULL_RTX)
22630 /* PROB is the difference from 50%. */
22631 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22632 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22634 /* Only hint for highly probable/improbable branches on newer cpus when
22635 we have real profile data, as static prediction overrides processor
22636 dynamic prediction. For older cpus we may as well always hint, but
22637 assume not taken for branches that are very close to 50% as a
22638 mispredicted taken branch is more expensive than a
22639 mispredicted not-taken branch. */
22640 if (rs6000_always_hint
22641 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22642 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22643 && br_prob_note_reliable_p (note)))
22645 if (abs (prob) > REG_BR_PROB_BASE / 20
22646 && ((prob > 0) ^ need_longbranch))
22647 pred = "+";
22648 else
22649 pred = "-";
22653 if (label == NULL)
22654 s += sprintf (s, "b%slr%s ", ccode, pred);
22655 else
22656 s += sprintf (s, "b%s%s ", ccode, pred);
22658 /* We need to escape any '%' characters in the reg_names string.
22659 Assume they'd only be the first character.... */
22660 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22661 *s++ = '%';
22662 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22664 if (label != NULL)
22666 /* If the branch distance was too far, we may have to use an
22667 unconditional branch to go the distance. */
22668 if (need_longbranch)
22669 s += sprintf (s, ",$+8\n\tb %s", label);
22670 else
22671 s += sprintf (s, ",%s", label);
22674 return string;
22677 /* Return insn for VSX or Altivec comparisons. */
22679 static rtx
22680 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22682 rtx mask;
22683 machine_mode mode = GET_MODE (op0);
22685 switch (code)
22687 default:
22688 break;
22690 case GE:
22691 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22692 return NULL_RTX;
22693 /* FALLTHRU */
22695 case EQ:
22696 case GT:
22697 case GTU:
22698 case ORDERED:
22699 case UNORDERED:
22700 case UNEQ:
22701 case LTGT:
22702 mask = gen_reg_rtx (mode);
22703 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22704 return mask;
22707 return NULL_RTX;
22710 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22711 DMODE is expected destination mode. This is a recursive function. */
22713 static rtx
22714 rs6000_emit_vector_compare (enum rtx_code rcode,
22715 rtx op0, rtx op1,
22716 machine_mode dmode)
22718 rtx mask;
22719 bool swap_operands = false;
22720 bool try_again = false;
22722 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22723 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22725 /* See if the comparison works as is. */
22726 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22727 if (mask)
22728 return mask;
22730 switch (rcode)
22732 case LT:
22733 rcode = GT;
22734 swap_operands = true;
22735 try_again = true;
22736 break;
22737 case LTU:
22738 rcode = GTU;
22739 swap_operands = true;
22740 try_again = true;
22741 break;
22742 case NE:
22743 case UNLE:
22744 case UNLT:
22745 case UNGE:
22746 case UNGT:
22747 /* Invert condition and try again.
22748 e.g., A != B becomes ~(A==B). */
22750 enum rtx_code rev_code;
22751 enum insn_code nor_code;
22752 rtx mask2;
22754 rev_code = reverse_condition_maybe_unordered (rcode);
22755 if (rev_code == UNKNOWN)
22756 return NULL_RTX;
22758 nor_code = optab_handler (one_cmpl_optab, dmode);
22759 if (nor_code == CODE_FOR_nothing)
22760 return NULL_RTX;
22762 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22763 if (!mask2)
22764 return NULL_RTX;
22766 mask = gen_reg_rtx (dmode);
22767 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22768 return mask;
22770 break;
22771 case GE:
22772 case GEU:
22773 case LE:
22774 case LEU:
22775 /* Try GT/GTU/LT/LTU OR EQ */
22777 rtx c_rtx, eq_rtx;
22778 enum insn_code ior_code;
22779 enum rtx_code new_code;
22781 switch (rcode)
22783 case GE:
22784 new_code = GT;
22785 break;
22787 case GEU:
22788 new_code = GTU;
22789 break;
22791 case LE:
22792 new_code = LT;
22793 break;
22795 case LEU:
22796 new_code = LTU;
22797 break;
22799 default:
22800 gcc_unreachable ();
22803 ior_code = optab_handler (ior_optab, dmode);
22804 if (ior_code == CODE_FOR_nothing)
22805 return NULL_RTX;
22807 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22808 if (!c_rtx)
22809 return NULL_RTX;
22811 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22812 if (!eq_rtx)
22813 return NULL_RTX;
22815 mask = gen_reg_rtx (dmode);
22816 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22817 return mask;
22819 break;
22820 default:
22821 return NULL_RTX;
22824 if (try_again)
22826 if (swap_operands)
22827 std::swap (op0, op1);
22829 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22830 if (mask)
22831 return mask;
22834 /* You only get two chances. */
22835 return NULL_RTX;
22838 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22839 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22840 operands for the relation operation COND. */
22843 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22844 rtx cond, rtx cc_op0, rtx cc_op1)
22846 machine_mode dest_mode = GET_MODE (dest);
22847 machine_mode mask_mode = GET_MODE (cc_op0);
22848 enum rtx_code rcode = GET_CODE (cond);
22849 machine_mode cc_mode = CCmode;
22850 rtx mask;
22851 rtx cond2;
22852 bool invert_move = false;
22854 if (VECTOR_UNIT_NONE_P (dest_mode))
22855 return 0;
22857 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22858 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22860 switch (rcode)
22862 /* Swap operands if we can, and fall back to doing the operation as
22863 specified, and doing a NOR to invert the test. */
22864 case NE:
22865 case UNLE:
22866 case UNLT:
22867 case UNGE:
22868 case UNGT:
22869 /* Invert condition and try again.
22870 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22871 invert_move = true;
22872 rcode = reverse_condition_maybe_unordered (rcode);
22873 if (rcode == UNKNOWN)
22874 return 0;
22875 break;
22877 case GE:
22878 case LE:
22879 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22881 /* Invert condition to avoid compound test. */
22882 invert_move = true;
22883 rcode = reverse_condition (rcode);
22885 break;
22887 case GTU:
22888 case GEU:
22889 case LTU:
22890 case LEU:
22891 /* Mark unsigned tests with CCUNSmode. */
22892 cc_mode = CCUNSmode;
22894 /* Invert condition to avoid compound test if necessary. */
22895 if (rcode == GEU || rcode == LEU)
22897 invert_move = true;
22898 rcode = reverse_condition (rcode);
22900 break;
22902 default:
22903 break;
22906 /* Get the vector mask for the given relational operations. */
22907 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22909 if (!mask)
22910 return 0;
22912 if (invert_move)
22913 std::swap (op_true, op_false);
22915 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22916 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22917 && (GET_CODE (op_true) == CONST_VECTOR
22918 || GET_CODE (op_false) == CONST_VECTOR))
22920 rtx constant_0 = CONST0_RTX (dest_mode);
22921 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22923 if (op_true == constant_m1 && op_false == constant_0)
22925 emit_move_insn (dest, mask);
22926 return 1;
22929 else if (op_true == constant_0 && op_false == constant_m1)
22931 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22932 return 1;
22935 /* If we can't use the vector comparison directly, perhaps we can use
22936 the mask for the true or false fields, instead of loading up a
22937 constant. */
22938 if (op_true == constant_m1)
22939 op_true = mask;
22941 if (op_false == constant_0)
22942 op_false = mask;
22945 if (!REG_P (op_true) && !SUBREG_P (op_true))
22946 op_true = force_reg (dest_mode, op_true);
22948 if (!REG_P (op_false) && !SUBREG_P (op_false))
22949 op_false = force_reg (dest_mode, op_false);
22951 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22952 CONST0_RTX (dest_mode));
22953 emit_insn (gen_rtx_SET (dest,
22954 gen_rtx_IF_THEN_ELSE (dest_mode,
22955 cond2,
22956 op_true,
22957 op_false)));
22958 return 1;
22961 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22962 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22963 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22964 hardware has no such operation. */
22966 static int
22967 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22969 enum rtx_code code = GET_CODE (op);
22970 rtx op0 = XEXP (op, 0);
22971 rtx op1 = XEXP (op, 1);
22972 machine_mode compare_mode = GET_MODE (op0);
22973 machine_mode result_mode = GET_MODE (dest);
22974 bool max_p = false;
22976 if (result_mode != compare_mode)
22977 return 0;
22979 if (code == GE || code == GT)
22980 max_p = true;
22981 else if (code == LE || code == LT)
22982 max_p = false;
22983 else
22984 return 0;
22986 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22989 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22990 max_p = !max_p;
22992 else
22993 return 0;
22995 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22996 return 1;
22999 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
23000 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
23001 operands of the last comparison is nonzero/true, FALSE_COND if it is
23002 zero/false. Return 0 if the hardware has no such operation. */
23004 static int
23005 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23007 enum rtx_code code = GET_CODE (op);
23008 rtx op0 = XEXP (op, 0);
23009 rtx op1 = XEXP (op, 1);
23010 machine_mode result_mode = GET_MODE (dest);
23011 rtx compare_rtx;
23012 rtx cmove_rtx;
23013 rtx clobber_rtx;
23015 if (!can_create_pseudo_p ())
23016 return 0;
23018 switch (code)
23020 case EQ:
23021 case GE:
23022 case GT:
23023 break;
23025 case NE:
23026 case LT:
23027 case LE:
23028 code = swap_condition (code);
23029 std::swap (op0, op1);
23030 break;
23032 default:
23033 return 0;
23036 /* Generate: [(parallel [(set (dest)
23037 (if_then_else (op (cmp1) (cmp2))
23038 (true)
23039 (false)))
23040 (clobber (scratch))])]. */
23042 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23043 cmove_rtx = gen_rtx_SET (dest,
23044 gen_rtx_IF_THEN_ELSE (result_mode,
23045 compare_rtx,
23046 true_cond,
23047 false_cond));
23049 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23050 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23051 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23053 return 1;
23056 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23057 operands of the last comparison is nonzero/true, FALSE_COND if it
23058 is zero/false. Return 0 if the hardware has no such operation. */
23061 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23063 enum rtx_code code = GET_CODE (op);
23064 rtx op0 = XEXP (op, 0);
23065 rtx op1 = XEXP (op, 1);
23066 machine_mode compare_mode = GET_MODE (op0);
23067 machine_mode result_mode = GET_MODE (dest);
23068 rtx temp;
23069 bool is_against_zero;
23071 /* These modes should always match. */
23072 if (GET_MODE (op1) != compare_mode
23073 /* In the isel case however, we can use a compare immediate, so
23074 op1 may be a small constant. */
23075 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23076 return 0;
23077 if (GET_MODE (true_cond) != result_mode)
23078 return 0;
23079 if (GET_MODE (false_cond) != result_mode)
23080 return 0;
23082 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23083 if (TARGET_P9_MINMAX
23084 && (compare_mode == SFmode || compare_mode == DFmode)
23085 && (result_mode == SFmode || result_mode == DFmode))
23087 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23088 return 1;
23090 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23091 return 1;
23094 /* Don't allow using floating point comparisons for integer results for
23095 now. */
23096 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23097 return 0;
23099 /* First, work out if the hardware can do this at all, or
23100 if it's too slow.... */
23101 if (!FLOAT_MODE_P (compare_mode))
23103 if (TARGET_ISEL)
23104 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23105 return 0;
23108 is_against_zero = op1 == CONST0_RTX (compare_mode);
23110 /* A floating-point subtract might overflow, underflow, or produce
23111 an inexact result, thus changing the floating-point flags, so it
23112 can't be generated if we care about that. It's safe if one side
23113 of the construct is zero, since then no subtract will be
23114 generated. */
23115 if (SCALAR_FLOAT_MODE_P (compare_mode)
23116 && flag_trapping_math && ! is_against_zero)
23117 return 0;
23119 /* Eliminate half of the comparisons by switching operands, this
23120 makes the remaining code simpler. */
23121 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23122 || code == LTGT || code == LT || code == UNLE)
23124 code = reverse_condition_maybe_unordered (code);
23125 temp = true_cond;
23126 true_cond = false_cond;
23127 false_cond = temp;
23130 /* UNEQ and LTGT take four instructions for a comparison with zero,
23131 it'll probably be faster to use a branch here too. */
23132 if (code == UNEQ && HONOR_NANS (compare_mode))
23133 return 0;
23135 /* We're going to try to implement comparisons by performing
23136 a subtract, then comparing against zero. Unfortunately,
23137 Inf - Inf is NaN which is not zero, and so if we don't
23138 know that the operand is finite and the comparison
23139 would treat EQ different to UNORDERED, we can't do it. */
23140 if (HONOR_INFINITIES (compare_mode)
23141 && code != GT && code != UNGE
23142 && (GET_CODE (op1) != CONST_DOUBLE
23143 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23144 /* Constructs of the form (a OP b ? a : b) are safe. */
23145 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23146 || (! rtx_equal_p (op0, true_cond)
23147 && ! rtx_equal_p (op1, true_cond))))
23148 return 0;
23150 /* At this point we know we can use fsel. */
23152 /* Reduce the comparison to a comparison against zero. */
23153 if (! is_against_zero)
23155 temp = gen_reg_rtx (compare_mode);
23156 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23157 op0 = temp;
23158 op1 = CONST0_RTX (compare_mode);
23161 /* If we don't care about NaNs we can reduce some of the comparisons
23162 down to faster ones. */
23163 if (! HONOR_NANS (compare_mode))
23164 switch (code)
23166 case GT:
23167 code = LE;
23168 temp = true_cond;
23169 true_cond = false_cond;
23170 false_cond = temp;
23171 break;
23172 case UNGE:
23173 code = GE;
23174 break;
23175 case UNEQ:
23176 code = EQ;
23177 break;
23178 default:
23179 break;
23182 /* Now, reduce everything down to a GE. */
23183 switch (code)
23185 case GE:
23186 break;
23188 case LE:
23189 temp = gen_reg_rtx (compare_mode);
23190 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23191 op0 = temp;
23192 break;
23194 case ORDERED:
23195 temp = gen_reg_rtx (compare_mode);
23196 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23197 op0 = temp;
23198 break;
23200 case EQ:
23201 temp = gen_reg_rtx (compare_mode);
23202 emit_insn (gen_rtx_SET (temp,
23203 gen_rtx_NEG (compare_mode,
23204 gen_rtx_ABS (compare_mode, op0))));
23205 op0 = temp;
23206 break;
23208 case UNGE:
23209 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23210 temp = gen_reg_rtx (result_mode);
23211 emit_insn (gen_rtx_SET (temp,
23212 gen_rtx_IF_THEN_ELSE (result_mode,
23213 gen_rtx_GE (VOIDmode,
23214 op0, op1),
23215 true_cond, false_cond)));
23216 false_cond = true_cond;
23217 true_cond = temp;
23219 temp = gen_reg_rtx (compare_mode);
23220 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23221 op0 = temp;
23222 break;
23224 case GT:
23225 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23226 temp = gen_reg_rtx (result_mode);
23227 emit_insn (gen_rtx_SET (temp,
23228 gen_rtx_IF_THEN_ELSE (result_mode,
23229 gen_rtx_GE (VOIDmode,
23230 op0, op1),
23231 true_cond, false_cond)));
23232 true_cond = false_cond;
23233 false_cond = temp;
23235 temp = gen_reg_rtx (compare_mode);
23236 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23237 op0 = temp;
23238 break;
23240 default:
23241 gcc_unreachable ();
23244 emit_insn (gen_rtx_SET (dest,
23245 gen_rtx_IF_THEN_ELSE (result_mode,
23246 gen_rtx_GE (VOIDmode,
23247 op0, op1),
23248 true_cond, false_cond)));
23249 return 1;
23252 /* Same as above, but for ints (isel). */
23255 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23257 rtx condition_rtx, cr;
23258 machine_mode mode = GET_MODE (dest);
23259 enum rtx_code cond_code;
23260 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23261 bool signedp;
23263 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23264 return 0;
23266 /* We still have to do the compare, because isel doesn't do a
23267 compare, it just looks at the CRx bits set by a previous compare
23268 instruction. */
23269 condition_rtx = rs6000_generate_compare (op, mode);
23270 cond_code = GET_CODE (condition_rtx);
23271 cr = XEXP (condition_rtx, 0);
23272 signedp = GET_MODE (cr) == CCmode;
23274 isel_func = (mode == SImode
23275 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23276 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23278 switch (cond_code)
23280 case LT: case GT: case LTU: case GTU: case EQ:
23281 /* isel handles these directly. */
23282 break;
23284 default:
23285 /* We need to swap the sense of the comparison. */
23287 std::swap (false_cond, true_cond);
23288 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23290 break;
23293 false_cond = force_reg (mode, false_cond);
23294 if (true_cond != const0_rtx)
23295 true_cond = force_reg (mode, true_cond);
23297 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23299 return 1;
23302 void
23303 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23305 machine_mode mode = GET_MODE (op0);
23306 enum rtx_code c;
23307 rtx target;
23309 /* VSX/altivec have direct min/max insns. */
23310 if ((code == SMAX || code == SMIN)
23311 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23312 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23314 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23315 return;
23318 if (code == SMAX || code == SMIN)
23319 c = GE;
23320 else
23321 c = GEU;
23323 if (code == SMAX || code == UMAX)
23324 target = emit_conditional_move (dest, c, op0, op1, mode,
23325 op0, op1, mode, 0);
23326 else
23327 target = emit_conditional_move (dest, c, op0, op1, mode,
23328 op1, op0, mode, 0);
23329 gcc_assert (target);
23330 if (target != dest)
23331 emit_move_insn (dest, target);
23334 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23335 for the value to come from memory or if it is already loaded into a GPR. */
23337 void
23338 rs6000_split_signbit (rtx dest, rtx src)
23340 machine_mode d_mode = GET_MODE (dest);
23341 machine_mode s_mode = GET_MODE (src);
23342 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23343 rtx shift_reg = dest_di;
23345 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23347 if (MEM_P (src))
23349 rtx mem = (WORDS_BIG_ENDIAN
23350 ? adjust_address (src, DImode, 0)
23351 : adjust_address (src, DImode, 8));
23352 emit_insn (gen_rtx_SET (dest_di, mem));
23355 else
23357 unsigned int r = reg_or_subregno (src);
23359 if (INT_REGNO_P (r))
23360 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23362 else
23364 /* Generate the special mfvsrd instruction to get it in a GPR. */
23365 gcc_assert (VSX_REGNO_P (r));
23366 if (s_mode == KFmode)
23367 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23368 else
23369 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23373 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23374 return;
23377 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23378 COND is true. Mark the jump as unlikely to be taken. */
23380 static void
23381 emit_unlikely_jump (rtx cond, rtx label)
23383 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23384 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23385 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23388 /* A subroutine of the atomic operation splitters. Emit a load-locked
23389 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23390 the zero_extend operation. */
23392 static void
23393 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23395 rtx (*fn) (rtx, rtx) = NULL;
23397 switch (mode)
23399 case E_QImode:
23400 fn = gen_load_lockedqi;
23401 break;
23402 case E_HImode:
23403 fn = gen_load_lockedhi;
23404 break;
23405 case E_SImode:
23406 if (GET_MODE (mem) == QImode)
23407 fn = gen_load_lockedqi_si;
23408 else if (GET_MODE (mem) == HImode)
23409 fn = gen_load_lockedhi_si;
23410 else
23411 fn = gen_load_lockedsi;
23412 break;
23413 case E_DImode:
23414 fn = gen_load_lockeddi;
23415 break;
23416 case E_TImode:
23417 fn = gen_load_lockedti;
23418 break;
23419 default:
23420 gcc_unreachable ();
23422 emit_insn (fn (reg, mem));
23425 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23426 instruction in MODE. */
23428 static void
23429 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23431 rtx (*fn) (rtx, rtx, rtx) = NULL;
23433 switch (mode)
23435 case E_QImode:
23436 fn = gen_store_conditionalqi;
23437 break;
23438 case E_HImode:
23439 fn = gen_store_conditionalhi;
23440 break;
23441 case E_SImode:
23442 fn = gen_store_conditionalsi;
23443 break;
23444 case E_DImode:
23445 fn = gen_store_conditionaldi;
23446 break;
23447 case E_TImode:
23448 fn = gen_store_conditionalti;
23449 break;
23450 default:
23451 gcc_unreachable ();
23454 /* Emit sync before stwcx. to address PPC405 Erratum. */
23455 if (PPC405_ERRATUM77)
23456 emit_insn (gen_hwsync ());
23458 emit_insn (fn (res, mem, val));
23461 /* Expand barriers before and after a load_locked/store_cond sequence. */
23463 static rtx
23464 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23466 rtx addr = XEXP (mem, 0);
23468 if (!legitimate_indirect_address_p (addr, reload_completed)
23469 && !legitimate_indexed_address_p (addr, reload_completed))
23471 addr = force_reg (Pmode, addr);
23472 mem = replace_equiv_address_nv (mem, addr);
23475 switch (model)
23477 case MEMMODEL_RELAXED:
23478 case MEMMODEL_CONSUME:
23479 case MEMMODEL_ACQUIRE:
23480 break;
23481 case MEMMODEL_RELEASE:
23482 case MEMMODEL_ACQ_REL:
23483 emit_insn (gen_lwsync ());
23484 break;
23485 case MEMMODEL_SEQ_CST:
23486 emit_insn (gen_hwsync ());
23487 break;
23488 default:
23489 gcc_unreachable ();
23491 return mem;
23494 static void
23495 rs6000_post_atomic_barrier (enum memmodel model)
23497 switch (model)
23499 case MEMMODEL_RELAXED:
23500 case MEMMODEL_CONSUME:
23501 case MEMMODEL_RELEASE:
23502 break;
23503 case MEMMODEL_ACQUIRE:
23504 case MEMMODEL_ACQ_REL:
23505 case MEMMODEL_SEQ_CST:
23506 emit_insn (gen_isync ());
23507 break;
23508 default:
23509 gcc_unreachable ();
23513 /* A subroutine of the various atomic expanders. For sub-word operations,
23514 we must adjust things to operate on SImode. Given the original MEM,
23515 return a new aligned memory. Also build and return the quantities by
23516 which to shift and mask. */
23518 static rtx
23519 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23521 rtx addr, align, shift, mask, mem;
23522 HOST_WIDE_INT shift_mask;
23523 machine_mode mode = GET_MODE (orig_mem);
23525 /* For smaller modes, we have to implement this via SImode. */
23526 shift_mask = (mode == QImode ? 0x18 : 0x10);
23528 addr = XEXP (orig_mem, 0);
23529 addr = force_reg (GET_MODE (addr), addr);
23531 /* Aligned memory containing subword. Generate a new memory. We
23532 do not want any of the existing MEM_ATTR data, as we're now
23533 accessing memory outside the original object. */
23534 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23535 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23536 mem = gen_rtx_MEM (SImode, align);
23537 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23538 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23539 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23541 /* Shift amount for subword relative to aligned word. */
23542 shift = gen_reg_rtx (SImode);
23543 addr = gen_lowpart (SImode, addr);
23544 rtx tmp = gen_reg_rtx (SImode);
23545 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23546 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23547 if (BYTES_BIG_ENDIAN)
23548 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23549 shift, 1, OPTAB_LIB_WIDEN);
23550 *pshift = shift;
23552 /* Mask for insertion. */
23553 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23554 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23555 *pmask = mask;
23557 return mem;
23560 /* A subroutine of the various atomic expanders. For sub-word operands,
23561 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23563 static rtx
23564 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23566 rtx x;
23568 x = gen_reg_rtx (SImode);
23569 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23570 gen_rtx_NOT (SImode, mask),
23571 oldval)));
23573 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23575 return x;
23578 /* A subroutine of the various atomic expanders. For sub-word operands,
23579 extract WIDE to NARROW via SHIFT. */
23581 static void
23582 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23584 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23585 wide, 1, OPTAB_LIB_WIDEN);
23586 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23589 /* Expand an atomic compare and swap operation. */
23591 void
23592 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23594 rtx boolval, retval, mem, oldval, newval, cond;
23595 rtx label1, label2, x, mask, shift;
23596 machine_mode mode, orig_mode;
23597 enum memmodel mod_s, mod_f;
23598 bool is_weak;
23600 boolval = operands[0];
23601 retval = operands[1];
23602 mem = operands[2];
23603 oldval = operands[3];
23604 newval = operands[4];
23605 is_weak = (INTVAL (operands[5]) != 0);
23606 mod_s = memmodel_base (INTVAL (operands[6]));
23607 mod_f = memmodel_base (INTVAL (operands[7]));
23608 orig_mode = mode = GET_MODE (mem);
23610 mask = shift = NULL_RTX;
23611 if (mode == QImode || mode == HImode)
23613 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23614 lwarx and shift/mask operations. With power8, we need to do the
23615 comparison in SImode, but the store is still done in QI/HImode. */
23616 oldval = convert_modes (SImode, mode, oldval, 1);
23618 if (!TARGET_SYNC_HI_QI)
23620 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23622 /* Shift and mask OLDVAL into position with the word. */
23623 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23624 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23626 /* Shift and mask NEWVAL into position within the word. */
23627 newval = convert_modes (SImode, mode, newval, 1);
23628 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23629 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23632 /* Prepare to adjust the return value. */
23633 retval = gen_reg_rtx (SImode);
23634 mode = SImode;
23636 else if (reg_overlap_mentioned_p (retval, oldval))
23637 oldval = copy_to_reg (oldval);
23639 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23640 oldval = copy_to_mode_reg (mode, oldval);
23642 if (reg_overlap_mentioned_p (retval, newval))
23643 newval = copy_to_reg (newval);
23645 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23647 label1 = NULL_RTX;
23648 if (!is_weak)
23650 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23651 emit_label (XEXP (label1, 0));
23653 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23655 emit_load_locked (mode, retval, mem);
23657 x = retval;
23658 if (mask)
23659 x = expand_simple_binop (SImode, AND, retval, mask,
23660 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23662 cond = gen_reg_rtx (CCmode);
23663 /* If we have TImode, synthesize a comparison. */
23664 if (mode != TImode)
23665 x = gen_rtx_COMPARE (CCmode, x, oldval);
23666 else
23668 rtx xor1_result = gen_reg_rtx (DImode);
23669 rtx xor2_result = gen_reg_rtx (DImode);
23670 rtx or_result = gen_reg_rtx (DImode);
23671 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23672 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23673 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23674 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23676 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23677 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23678 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23679 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23682 emit_insn (gen_rtx_SET (cond, x));
23684 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23685 emit_unlikely_jump (x, label2);
23687 x = newval;
23688 if (mask)
23689 x = rs6000_mask_atomic_subword (retval, newval, mask);
23691 emit_store_conditional (orig_mode, cond, mem, x);
23693 if (!is_weak)
23695 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23696 emit_unlikely_jump (x, label1);
23699 if (!is_mm_relaxed (mod_f))
23700 emit_label (XEXP (label2, 0));
23702 rs6000_post_atomic_barrier (mod_s);
23704 if (is_mm_relaxed (mod_f))
23705 emit_label (XEXP (label2, 0));
23707 if (shift)
23708 rs6000_finish_atomic_subword (operands[1], retval, shift);
23709 else if (mode != GET_MODE (operands[1]))
23710 convert_move (operands[1], retval, 1);
23712 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23713 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23714 emit_insn (gen_rtx_SET (boolval, x));
23717 /* Expand an atomic exchange operation. */
23719 void
23720 rs6000_expand_atomic_exchange (rtx operands[])
23722 rtx retval, mem, val, cond;
23723 machine_mode mode;
23724 enum memmodel model;
23725 rtx label, x, mask, shift;
23727 retval = operands[0];
23728 mem = operands[1];
23729 val = operands[2];
23730 model = memmodel_base (INTVAL (operands[3]));
23731 mode = GET_MODE (mem);
23733 mask = shift = NULL_RTX;
23734 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23736 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23738 /* Shift and mask VAL into position with the word. */
23739 val = convert_modes (SImode, mode, val, 1);
23740 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23741 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23743 /* Prepare to adjust the return value. */
23744 retval = gen_reg_rtx (SImode);
23745 mode = SImode;
23748 mem = rs6000_pre_atomic_barrier (mem, model);
23750 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23751 emit_label (XEXP (label, 0));
23753 emit_load_locked (mode, retval, mem);
23755 x = val;
23756 if (mask)
23757 x = rs6000_mask_atomic_subword (retval, val, mask);
23759 cond = gen_reg_rtx (CCmode);
23760 emit_store_conditional (mode, cond, mem, x);
23762 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23763 emit_unlikely_jump (x, label);
23765 rs6000_post_atomic_barrier (model);
23767 if (shift)
23768 rs6000_finish_atomic_subword (operands[0], retval, shift);
23771 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23772 to perform. MEM is the memory on which to operate. VAL is the second
23773 operand of the binary operator. BEFORE and AFTER are optional locations to
23774 return the value of MEM either before of after the operation. MODEL_RTX
23775 is a CONST_INT containing the memory model to use. */
23777 void
23778 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23779 rtx orig_before, rtx orig_after, rtx model_rtx)
23781 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23782 machine_mode mode = GET_MODE (mem);
23783 machine_mode store_mode = mode;
23784 rtx label, x, cond, mask, shift;
23785 rtx before = orig_before, after = orig_after;
23787 mask = shift = NULL_RTX;
23788 /* On power8, we want to use SImode for the operation. On previous systems,
23789 use the operation in a subword and shift/mask to get the proper byte or
23790 halfword. */
23791 if (mode == QImode || mode == HImode)
23793 if (TARGET_SYNC_HI_QI)
23795 val = convert_modes (SImode, mode, val, 1);
23797 /* Prepare to adjust the return value. */
23798 before = gen_reg_rtx (SImode);
23799 if (after)
23800 after = gen_reg_rtx (SImode);
23801 mode = SImode;
23803 else
23805 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23807 /* Shift and mask VAL into position with the word. */
23808 val = convert_modes (SImode, mode, val, 1);
23809 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23810 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23812 switch (code)
23814 case IOR:
23815 case XOR:
23816 /* We've already zero-extended VAL. That is sufficient to
23817 make certain that it does not affect other bits. */
23818 mask = NULL;
23819 break;
23821 case AND:
23822 /* If we make certain that all of the other bits in VAL are
23823 set, that will be sufficient to not affect other bits. */
23824 x = gen_rtx_NOT (SImode, mask);
23825 x = gen_rtx_IOR (SImode, x, val);
23826 emit_insn (gen_rtx_SET (val, x));
23827 mask = NULL;
23828 break;
23830 case NOT:
23831 case PLUS:
23832 case MINUS:
23833 /* These will all affect bits outside the field and need
23834 adjustment via MASK within the loop. */
23835 break;
23837 default:
23838 gcc_unreachable ();
23841 /* Prepare to adjust the return value. */
23842 before = gen_reg_rtx (SImode);
23843 if (after)
23844 after = gen_reg_rtx (SImode);
23845 store_mode = mode = SImode;
23849 mem = rs6000_pre_atomic_barrier (mem, model);
23851 label = gen_label_rtx ();
23852 emit_label (label);
23853 label = gen_rtx_LABEL_REF (VOIDmode, label);
23855 if (before == NULL_RTX)
23856 before = gen_reg_rtx (mode);
23858 emit_load_locked (mode, before, mem);
23860 if (code == NOT)
23862 x = expand_simple_binop (mode, AND, before, val,
23863 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23864 after = expand_simple_unop (mode, NOT, x, after, 1);
23866 else
23868 after = expand_simple_binop (mode, code, before, val,
23869 after, 1, OPTAB_LIB_WIDEN);
23872 x = after;
23873 if (mask)
23875 x = expand_simple_binop (SImode, AND, after, mask,
23876 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23877 x = rs6000_mask_atomic_subword (before, x, mask);
23879 else if (store_mode != mode)
23880 x = convert_modes (store_mode, mode, x, 1);
23882 cond = gen_reg_rtx (CCmode);
23883 emit_store_conditional (store_mode, cond, mem, x);
23885 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23886 emit_unlikely_jump (x, label);
23888 rs6000_post_atomic_barrier (model);
23890 if (shift)
23892 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23893 then do the calcuations in a SImode register. */
23894 if (orig_before)
23895 rs6000_finish_atomic_subword (orig_before, before, shift);
23896 if (orig_after)
23897 rs6000_finish_atomic_subword (orig_after, after, shift);
23899 else if (store_mode != mode)
23901 /* QImode/HImode on machines with lbarx/lharx where we do the native
23902 operation and then do the calcuations in a SImode register. */
23903 if (orig_before)
23904 convert_move (orig_before, before, 1);
23905 if (orig_after)
23906 convert_move (orig_after, after, 1);
23908 else if (orig_after && after != orig_after)
23909 emit_move_insn (orig_after, after);
23912 /* Emit instructions to move SRC to DST. Called by splitters for
23913 multi-register moves. It will emit at most one instruction for
23914 each register that is accessed; that is, it won't emit li/lis pairs
23915 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23916 register. */
23918 void
23919 rs6000_split_multireg_move (rtx dst, rtx src)
23921 /* The register number of the first register being moved. */
23922 int reg;
23923 /* The mode that is to be moved. */
23924 machine_mode mode;
23925 /* The mode that the move is being done in, and its size. */
23926 machine_mode reg_mode;
23927 int reg_mode_size;
23928 /* The number of registers that will be moved. */
23929 int nregs;
23931 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23932 mode = GET_MODE (dst);
23933 nregs = hard_regno_nregs (reg, mode);
23934 if (FP_REGNO_P (reg))
23935 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23936 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23937 else if (ALTIVEC_REGNO_P (reg))
23938 reg_mode = V16QImode;
23939 else
23940 reg_mode = word_mode;
23941 reg_mode_size = GET_MODE_SIZE (reg_mode);
23943 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23945 /* TDmode residing in FP registers is special, since the ISA requires that
23946 the lower-numbered word of a register pair is always the most significant
23947 word, even in little-endian mode. This does not match the usual subreg
23948 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23949 the appropriate constituent registers "by hand" in little-endian mode.
23951 Note we do not need to check for destructive overlap here since TDmode
23952 can only reside in even/odd register pairs. */
23953 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23955 rtx p_src, p_dst;
23956 int i;
23958 for (i = 0; i < nregs; i++)
23960 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23961 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23962 else
23963 p_src = simplify_gen_subreg (reg_mode, src, mode,
23964 i * reg_mode_size);
23966 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23967 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23968 else
23969 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23970 i * reg_mode_size);
23972 emit_insn (gen_rtx_SET (p_dst, p_src));
23975 return;
23978 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23980 /* Move register range backwards, if we might have destructive
23981 overlap. */
23982 int i;
23983 for (i = nregs - 1; i >= 0; i--)
23984 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23985 i * reg_mode_size),
23986 simplify_gen_subreg (reg_mode, src, mode,
23987 i * reg_mode_size)));
23989 else
23991 int i;
23992 int j = -1;
23993 bool used_update = false;
23994 rtx restore_basereg = NULL_RTX;
23996 if (MEM_P (src) && INT_REGNO_P (reg))
23998 rtx breg;
24000 if (GET_CODE (XEXP (src, 0)) == PRE_INC
24001 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
24003 rtx delta_rtx;
24004 breg = XEXP (XEXP (src, 0), 0);
24005 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
24006 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
24007 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
24008 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24009 src = replace_equiv_address (src, breg);
24011 else if (! rs6000_offsettable_memref_p (src, reg_mode))
24013 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
24015 rtx basereg = XEXP (XEXP (src, 0), 0);
24016 if (TARGET_UPDATE)
24018 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
24019 emit_insn (gen_rtx_SET (ndst,
24020 gen_rtx_MEM (reg_mode,
24021 XEXP (src, 0))));
24022 used_update = true;
24024 else
24025 emit_insn (gen_rtx_SET (basereg,
24026 XEXP (XEXP (src, 0), 1)));
24027 src = replace_equiv_address (src, basereg);
24029 else
24031 rtx basereg = gen_rtx_REG (Pmode, reg);
24032 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
24033 src = replace_equiv_address (src, basereg);
24037 breg = XEXP (src, 0);
24038 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
24039 breg = XEXP (breg, 0);
24041 /* If the base register we are using to address memory is
24042 also a destination reg, then change that register last. */
24043 if (REG_P (breg)
24044 && REGNO (breg) >= REGNO (dst)
24045 && REGNO (breg) < REGNO (dst) + nregs)
24046 j = REGNO (breg) - REGNO (dst);
24048 else if (MEM_P (dst) && INT_REGNO_P (reg))
24050 rtx breg;
24052 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
24053 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
24055 rtx delta_rtx;
24056 breg = XEXP (XEXP (dst, 0), 0);
24057 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24058 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24059 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24061 /* We have to update the breg before doing the store.
24062 Use store with update, if available. */
24064 if (TARGET_UPDATE)
24066 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24067 emit_insn (TARGET_32BIT
24068 ? (TARGET_POWERPC64
24069 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24070 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24071 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24072 used_update = true;
24074 else
24075 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24076 dst = replace_equiv_address (dst, breg);
24078 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
24079 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24081 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24083 rtx basereg = XEXP (XEXP (dst, 0), 0);
24084 if (TARGET_UPDATE)
24086 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24087 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24088 XEXP (dst, 0)),
24089 nsrc));
24090 used_update = true;
24092 else
24093 emit_insn (gen_rtx_SET (basereg,
24094 XEXP (XEXP (dst, 0), 1)));
24095 dst = replace_equiv_address (dst, basereg);
24097 else
24099 rtx basereg = XEXP (XEXP (dst, 0), 0);
24100 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24101 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24102 && REG_P (basereg)
24103 && REG_P (offsetreg)
24104 && REGNO (basereg) != REGNO (offsetreg));
24105 if (REGNO (basereg) == 0)
24107 rtx tmp = offsetreg;
24108 offsetreg = basereg;
24109 basereg = tmp;
24111 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24112 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24113 dst = replace_equiv_address (dst, basereg);
24116 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24117 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24120 for (i = 0; i < nregs; i++)
24122 /* Calculate index to next subword. */
24123 ++j;
24124 if (j == nregs)
24125 j = 0;
24127 /* If compiler already emitted move of first word by
24128 store with update, no need to do anything. */
24129 if (j == 0 && used_update)
24130 continue;
24132 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24133 j * reg_mode_size),
24134 simplify_gen_subreg (reg_mode, src, mode,
24135 j * reg_mode_size)));
24137 if (restore_basereg != NULL_RTX)
24138 emit_insn (restore_basereg);
24143 /* This page contains routines that are used to determine what the
24144 function prologue and epilogue code will do and write them out. */
24146 /* Determine whether the REG is really used. */
24148 static bool
24149 save_reg_p (int reg)
24151 /* We need to mark the PIC offset register live for the same conditions
24152 as it is set up, or otherwise it won't be saved before we clobber it. */
24154 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24156 /* When calling eh_return, we must return true for all the cases
24157 where conditional_register_usage marks the PIC offset reg
24158 call used. */
24159 if (TARGET_TOC && TARGET_MINIMAL_TOC
24160 && (crtl->calls_eh_return
24161 || df_regs_ever_live_p (reg)
24162 || !constant_pool_empty_p ()))
24163 return true;
24165 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24166 && flag_pic)
24167 return true;
24170 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24173 /* Return the first fixed-point register that is required to be
24174 saved. 32 if none. */
24177 first_reg_to_save (void)
24179 int first_reg;
24181 /* Find lowest numbered live register. */
24182 for (first_reg = 13; first_reg <= 31; first_reg++)
24183 if (save_reg_p (first_reg))
24184 break;
24186 #if TARGET_MACHO
24187 if (flag_pic
24188 && crtl->uses_pic_offset_table
24189 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24190 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24191 #endif
24193 return first_reg;
24196 /* Similar, for FP regs. */
24199 first_fp_reg_to_save (void)
24201 int first_reg;
24203 /* Find lowest numbered live register. */
24204 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24205 if (save_reg_p (first_reg))
24206 break;
24208 return first_reg;
24211 /* Similar, for AltiVec regs. */
24213 static int
24214 first_altivec_reg_to_save (void)
24216 int i;
24218 /* Stack frame remains as is unless we are in AltiVec ABI. */
24219 if (! TARGET_ALTIVEC_ABI)
24220 return LAST_ALTIVEC_REGNO + 1;
24222 /* On Darwin, the unwind routines are compiled without
24223 TARGET_ALTIVEC, and use save_world to save/restore the
24224 altivec registers when necessary. */
24225 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24226 && ! TARGET_ALTIVEC)
24227 return FIRST_ALTIVEC_REGNO + 20;
24229 /* Find lowest numbered live register. */
24230 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24231 if (save_reg_p (i))
24232 break;
24234 return i;
24237 /* Return a 32-bit mask of the AltiVec registers we need to set in
24238 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24239 the 32-bit word is 0. */
24241 static unsigned int
24242 compute_vrsave_mask (void)
24244 unsigned int i, mask = 0;
24246 /* On Darwin, the unwind routines are compiled without
24247 TARGET_ALTIVEC, and use save_world to save/restore the
24248 call-saved altivec registers when necessary. */
24249 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24250 && ! TARGET_ALTIVEC)
24251 mask |= 0xFFF;
24253 /* First, find out if we use _any_ altivec registers. */
24254 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24255 if (df_regs_ever_live_p (i))
24256 mask |= ALTIVEC_REG_BIT (i);
24258 if (mask == 0)
24259 return mask;
24261 /* Next, remove the argument registers from the set. These must
24262 be in the VRSAVE mask set by the caller, so we don't need to add
24263 them in again. More importantly, the mask we compute here is
24264 used to generate CLOBBERs in the set_vrsave insn, and we do not
24265 wish the argument registers to die. */
24266 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24267 mask &= ~ALTIVEC_REG_BIT (i);
24269 /* Similarly, remove the return value from the set. */
24271 bool yes = false;
24272 diddle_return_value (is_altivec_return_reg, &yes);
24273 if (yes)
24274 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24277 return mask;
24280 /* For a very restricted set of circumstances, we can cut down the
24281 size of prologues/epilogues by calling our own save/restore-the-world
24282 routines. */
24284 static void
24285 compute_save_world_info (rs6000_stack_t *info)
24287 info->world_save_p = 1;
24288 info->world_save_p
24289 = (WORLD_SAVE_P (info)
24290 && DEFAULT_ABI == ABI_DARWIN
24291 && !cfun->has_nonlocal_label
24292 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24293 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24294 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24295 && info->cr_save_p);
24297 /* This will not work in conjunction with sibcalls. Make sure there
24298 are none. (This check is expensive, but seldom executed.) */
24299 if (WORLD_SAVE_P (info))
24301 rtx_insn *insn;
24302 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24303 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24305 info->world_save_p = 0;
24306 break;
24310 if (WORLD_SAVE_P (info))
24312 /* Even if we're not touching VRsave, make sure there's room on the
24313 stack for it, if it looks like we're calling SAVE_WORLD, which
24314 will attempt to save it. */
24315 info->vrsave_size = 4;
24317 /* If we are going to save the world, we need to save the link register too. */
24318 info->lr_save_p = 1;
24320 /* "Save" the VRsave register too if we're saving the world. */
24321 if (info->vrsave_mask == 0)
24322 info->vrsave_mask = compute_vrsave_mask ();
24324 /* Because the Darwin register save/restore routines only handle
24325 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24326 check. */
24327 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24328 && (info->first_altivec_reg_save
24329 >= FIRST_SAVED_ALTIVEC_REGNO));
24332 return;
24336 static void
24337 is_altivec_return_reg (rtx reg, void *xyes)
24339 bool *yes = (bool *) xyes;
24340 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24341 *yes = true;
24345 /* Return whether REG is a global user reg or has been specifed by
24346 -ffixed-REG. We should not restore these, and so cannot use
24347 lmw or out-of-line restore functions if there are any. We also
24348 can't save them (well, emit frame notes for them), because frame
24349 unwinding during exception handling will restore saved registers. */
24351 static bool
24352 fixed_reg_p (int reg)
24354 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24355 backend sets it, overriding anything the user might have given. */
24356 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24357 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24358 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24359 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24360 return false;
24362 return fixed_regs[reg];
24365 /* Determine the strategy for savings/restoring registers. */
24367 enum {
24368 SAVE_MULTIPLE = 0x1,
24369 SAVE_INLINE_GPRS = 0x2,
24370 SAVE_INLINE_FPRS = 0x4,
24371 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24372 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24373 SAVE_INLINE_VRS = 0x20,
24374 REST_MULTIPLE = 0x100,
24375 REST_INLINE_GPRS = 0x200,
24376 REST_INLINE_FPRS = 0x400,
24377 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24378 REST_INLINE_VRS = 0x1000
24381 static int
24382 rs6000_savres_strategy (rs6000_stack_t *info,
24383 bool using_static_chain_p)
24385 int strategy = 0;
24387 /* Select between in-line and out-of-line save and restore of regs.
24388 First, all the obvious cases where we don't use out-of-line. */
24389 if (crtl->calls_eh_return
24390 || cfun->machine->ra_need_lr)
24391 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24392 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24393 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24395 if (info->first_gp_reg_save == 32)
24396 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24398 if (info->first_fp_reg_save == 64
24399 /* The out-of-line FP routines use double-precision stores;
24400 we can't use those routines if we don't have such stores. */
24401 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24402 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24404 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24405 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24407 /* Define cutoff for using out-of-line functions to save registers. */
24408 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24410 if (!optimize_size)
24412 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24413 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24414 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24416 else
24418 /* Prefer out-of-line restore if it will exit. */
24419 if (info->first_fp_reg_save > 61)
24420 strategy |= SAVE_INLINE_FPRS;
24421 if (info->first_gp_reg_save > 29)
24423 if (info->first_fp_reg_save == 64)
24424 strategy |= SAVE_INLINE_GPRS;
24425 else
24426 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24428 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24429 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24432 else if (DEFAULT_ABI == ABI_DARWIN)
24434 if (info->first_fp_reg_save > 60)
24435 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24436 if (info->first_gp_reg_save > 29)
24437 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24438 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24440 else
24442 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24443 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24444 || info->first_fp_reg_save > 61)
24445 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24446 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24447 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24450 /* Don't bother to try to save things out-of-line if r11 is occupied
24451 by the static chain. It would require too much fiddling and the
24452 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24453 pointer on Darwin, and AIX uses r1 or r12. */
24454 if (using_static_chain_p
24455 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24456 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24457 | SAVE_INLINE_GPRS
24458 | SAVE_INLINE_VRS);
24460 /* Don't ever restore fixed regs. That means we can't use the
24461 out-of-line register restore functions if a fixed reg is in the
24462 range of regs restored. */
24463 if (!(strategy & REST_INLINE_FPRS))
24464 for (int i = info->first_fp_reg_save; i < 64; i++)
24465 if (fixed_regs[i])
24467 strategy |= REST_INLINE_FPRS;
24468 break;
24471 /* We can only use the out-of-line routines to restore fprs if we've
24472 saved all the registers from first_fp_reg_save in the prologue.
24473 Otherwise, we risk loading garbage. Of course, if we have saved
24474 out-of-line then we know we haven't skipped any fprs. */
24475 if ((strategy & SAVE_INLINE_FPRS)
24476 && !(strategy & REST_INLINE_FPRS))
24477 for (int i = info->first_fp_reg_save; i < 64; i++)
24478 if (!save_reg_p (i))
24480 strategy |= REST_INLINE_FPRS;
24481 break;
24484 /* Similarly, for altivec regs. */
24485 if (!(strategy & REST_INLINE_VRS))
24486 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24487 if (fixed_regs[i])
24489 strategy |= REST_INLINE_VRS;
24490 break;
24493 if ((strategy & SAVE_INLINE_VRS)
24494 && !(strategy & REST_INLINE_VRS))
24495 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24496 if (!save_reg_p (i))
24498 strategy |= REST_INLINE_VRS;
24499 break;
24502 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24503 saved is an out-of-line save or restore. Set up the value for
24504 the next test (excluding out-of-line gprs). */
24505 bool lr_save_p = (info->lr_save_p
24506 || !(strategy & SAVE_INLINE_FPRS)
24507 || !(strategy & SAVE_INLINE_VRS)
24508 || !(strategy & REST_INLINE_FPRS)
24509 || !(strategy & REST_INLINE_VRS));
24511 if (TARGET_MULTIPLE
24512 && !TARGET_POWERPC64
24513 && info->first_gp_reg_save < 31
24514 && !(flag_shrink_wrap
24515 && flag_shrink_wrap_separate
24516 && optimize_function_for_speed_p (cfun)))
24518 int count = 0;
24519 for (int i = info->first_gp_reg_save; i < 32; i++)
24520 if (save_reg_p (i))
24521 count++;
24523 if (count <= 1)
24524 /* Don't use store multiple if only one reg needs to be
24525 saved. This can occur for example when the ABI_V4 pic reg
24526 (r30) needs to be saved to make calls, but r31 is not
24527 used. */
24528 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24529 else
24531 /* Prefer store multiple for saves over out-of-line
24532 routines, since the store-multiple instruction will
24533 always be smaller. */
24534 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24536 /* The situation is more complicated with load multiple.
24537 We'd prefer to use the out-of-line routines for restores,
24538 since the "exit" out-of-line routines can handle the
24539 restore of LR and the frame teardown. However if doesn't
24540 make sense to use the out-of-line routine if that is the
24541 only reason we'd need to save LR, and we can't use the
24542 "exit" out-of-line gpr restore if we have saved some
24543 fprs; In those cases it is advantageous to use load
24544 multiple when available. */
24545 if (info->first_fp_reg_save != 64 || !lr_save_p)
24546 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24550 /* Using the "exit" out-of-line routine does not improve code size
24551 if using it would require lr to be saved and if only saving one
24552 or two gprs. */
24553 else if (!lr_save_p && info->first_gp_reg_save > 29)
24554 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24556 /* Don't ever restore fixed regs. */
24557 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24558 for (int i = info->first_gp_reg_save; i < 32; i++)
24559 if (fixed_reg_p (i))
24561 strategy |= REST_INLINE_GPRS;
24562 strategy &= ~REST_MULTIPLE;
24563 break;
24566 /* We can only use load multiple or the out-of-line routines to
24567 restore gprs if we've saved all the registers from
24568 first_gp_reg_save. Otherwise, we risk loading garbage.
24569 Of course, if we have saved out-of-line or used stmw then we know
24570 we haven't skipped any gprs. */
24571 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24572 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24573 for (int i = info->first_gp_reg_save; i < 32; i++)
24574 if (!save_reg_p (i))
24576 strategy |= REST_INLINE_GPRS;
24577 strategy &= ~REST_MULTIPLE;
24578 break;
24581 if (TARGET_ELF && TARGET_64BIT)
24583 if (!(strategy & SAVE_INLINE_FPRS))
24584 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24585 else if (!(strategy & SAVE_INLINE_GPRS)
24586 && info->first_fp_reg_save == 64)
24587 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24589 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24590 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24592 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24593 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24595 return strategy;
24598 /* Calculate the stack information for the current function. This is
24599 complicated by having two separate calling sequences, the AIX calling
24600 sequence and the V.4 calling sequence.
24602 AIX (and Darwin/Mac OS X) stack frames look like:
24603 32-bit 64-bit
24604 SP----> +---------------------------------------+
24605 | back chain to caller | 0 0
24606 +---------------------------------------+
24607 | saved CR | 4 8 (8-11)
24608 +---------------------------------------+
24609 | saved LR | 8 16
24610 +---------------------------------------+
24611 | reserved for compilers | 12 24
24612 +---------------------------------------+
24613 | reserved for binders | 16 32
24614 +---------------------------------------+
24615 | saved TOC pointer | 20 40
24616 +---------------------------------------+
24617 | Parameter save area (+padding*) (P) | 24 48
24618 +---------------------------------------+
24619 | Alloca space (A) | 24+P etc.
24620 +---------------------------------------+
24621 | Local variable space (L) | 24+P+A
24622 +---------------------------------------+
24623 | Float/int conversion temporary (X) | 24+P+A+L
24624 +---------------------------------------+
24625 | Save area for AltiVec registers (W) | 24+P+A+L+X
24626 +---------------------------------------+
24627 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24628 +---------------------------------------+
24629 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24630 +---------------------------------------+
24631 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24632 +---------------------------------------+
24633 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24634 +---------------------------------------+
24635 old SP->| back chain to caller's caller |
24636 +---------------------------------------+
24638 * If the alloca area is present, the parameter save area is
24639 padded so that the former starts 16-byte aligned.
24641 The required alignment for AIX configurations is two words (i.e., 8
24642 or 16 bytes).
24644 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24646 SP----> +---------------------------------------+
24647 | Back chain to caller | 0
24648 +---------------------------------------+
24649 | Save area for CR | 8
24650 +---------------------------------------+
24651 | Saved LR | 16
24652 +---------------------------------------+
24653 | Saved TOC pointer | 24
24654 +---------------------------------------+
24655 | Parameter save area (+padding*) (P) | 32
24656 +---------------------------------------+
24657 | Alloca space (A) | 32+P
24658 +---------------------------------------+
24659 | Local variable space (L) | 32+P+A
24660 +---------------------------------------+
24661 | Save area for AltiVec registers (W) | 32+P+A+L
24662 +---------------------------------------+
24663 | AltiVec alignment padding (Y) | 32+P+A+L+W
24664 +---------------------------------------+
24665 | Save area for GP registers (G) | 32+P+A+L+W+Y
24666 +---------------------------------------+
24667 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24668 +---------------------------------------+
24669 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24670 +---------------------------------------+
24672 * If the alloca area is present, the parameter save area is
24673 padded so that the former starts 16-byte aligned.
24675 V.4 stack frames look like:
24677 SP----> +---------------------------------------+
24678 | back chain to caller | 0
24679 +---------------------------------------+
24680 | caller's saved LR | 4
24681 +---------------------------------------+
24682 | Parameter save area (+padding*) (P) | 8
24683 +---------------------------------------+
24684 | Alloca space (A) | 8+P
24685 +---------------------------------------+
24686 | Varargs save area (V) | 8+P+A
24687 +---------------------------------------+
24688 | Local variable space (L) | 8+P+A+V
24689 +---------------------------------------+
24690 | Float/int conversion temporary (X) | 8+P+A+V+L
24691 +---------------------------------------+
24692 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24693 +---------------------------------------+
24694 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24695 +---------------------------------------+
24696 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24697 +---------------------------------------+
24698 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24699 +---------------------------------------+
24700 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24701 +---------------------------------------+
24702 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24703 +---------------------------------------+
24704 old SP->| back chain to caller's caller |
24705 +---------------------------------------+
24707 * If the alloca area is present and the required alignment is
24708 16 bytes, the parameter save area is padded so that the
24709 alloca area starts 16-byte aligned.
24711 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24712 given. (But note below and in sysv4.h that we require only 8 and
24713 may round up the size of our stack frame anyways. The historical
24714 reason is early versions of powerpc-linux which didn't properly
24715 align the stack at program startup. A happy side-effect is that
24716 -mno-eabi libraries can be used with -meabi programs.)
24718 The EABI configuration defaults to the V.4 layout. However,
24719 the stack alignment requirements may differ. If -mno-eabi is not
24720 given, the required stack alignment is 8 bytes; if -mno-eabi is
24721 given, the required alignment is 16 bytes. (But see V.4 comment
24722 above.) */
24724 #ifndef ABI_STACK_BOUNDARY
24725 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24726 #endif
24728 static rs6000_stack_t *
24729 rs6000_stack_info (void)
24731 /* We should never be called for thunks, we are not set up for that. */
24732 gcc_assert (!cfun->is_thunk);
24734 rs6000_stack_t *info = &stack_info;
24735 int reg_size = TARGET_32BIT ? 4 : 8;
24736 int ehrd_size;
24737 int ehcr_size;
24738 int save_align;
24739 int first_gp;
24740 HOST_WIDE_INT non_fixed_size;
24741 bool using_static_chain_p;
24743 if (reload_completed && info->reload_completed)
24744 return info;
24746 memset (info, 0, sizeof (*info));
24747 info->reload_completed = reload_completed;
24749 /* Select which calling sequence. */
24750 info->abi = DEFAULT_ABI;
24752 /* Calculate which registers need to be saved & save area size. */
24753 info->first_gp_reg_save = first_reg_to_save ();
24754 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24755 even if it currently looks like we won't. Reload may need it to
24756 get at a constant; if so, it will have already created a constant
24757 pool entry for it. */
24758 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24759 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24760 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24761 && crtl->uses_const_pool
24762 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24763 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24764 else
24765 first_gp = info->first_gp_reg_save;
24767 info->gp_size = reg_size * (32 - first_gp);
24769 info->first_fp_reg_save = first_fp_reg_to_save ();
24770 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24772 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24773 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24774 - info->first_altivec_reg_save);
24776 /* Does this function call anything? */
24777 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24779 /* Determine if we need to save the condition code registers. */
24780 if (save_reg_p (CR2_REGNO)
24781 || save_reg_p (CR3_REGNO)
24782 || save_reg_p (CR4_REGNO))
24784 info->cr_save_p = 1;
24785 if (DEFAULT_ABI == ABI_V4)
24786 info->cr_size = reg_size;
24789 /* If the current function calls __builtin_eh_return, then we need
24790 to allocate stack space for registers that will hold data for
24791 the exception handler. */
24792 if (crtl->calls_eh_return)
24794 unsigned int i;
24795 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24796 continue;
24798 ehrd_size = i * UNITS_PER_WORD;
24800 else
24801 ehrd_size = 0;
24803 /* In the ELFv2 ABI, we also need to allocate space for separate
24804 CR field save areas if the function calls __builtin_eh_return. */
24805 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24807 /* This hard-codes that we have three call-saved CR fields. */
24808 ehcr_size = 3 * reg_size;
24809 /* We do *not* use the regular CR save mechanism. */
24810 info->cr_save_p = 0;
24812 else
24813 ehcr_size = 0;
24815 /* Determine various sizes. */
24816 info->reg_size = reg_size;
24817 info->fixed_size = RS6000_SAVE_AREA;
24818 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24819 if (cfun->calls_alloca)
24820 info->parm_size =
24821 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24822 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24823 else
24824 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24825 TARGET_ALTIVEC ? 16 : 8);
24826 if (FRAME_GROWS_DOWNWARD)
24827 info->vars_size
24828 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24829 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24830 - (info->fixed_size + info->vars_size + info->parm_size);
24832 if (TARGET_ALTIVEC_ABI)
24833 info->vrsave_mask = compute_vrsave_mask ();
24835 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24836 info->vrsave_size = 4;
24838 compute_save_world_info (info);
24840 /* Calculate the offsets. */
24841 switch (DEFAULT_ABI)
24843 case ABI_NONE:
24844 default:
24845 gcc_unreachable ();
24847 case ABI_AIX:
24848 case ABI_ELFv2:
24849 case ABI_DARWIN:
24850 info->fp_save_offset = -info->fp_size;
24851 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24853 if (TARGET_ALTIVEC_ABI)
24855 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24857 /* Align stack so vector save area is on a quadword boundary.
24858 The padding goes above the vectors. */
24859 if (info->altivec_size != 0)
24860 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24862 info->altivec_save_offset = info->vrsave_save_offset
24863 - info->altivec_padding_size
24864 - info->altivec_size;
24865 gcc_assert (info->altivec_size == 0
24866 || info->altivec_save_offset % 16 == 0);
24868 /* Adjust for AltiVec case. */
24869 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24871 else
24872 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24874 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24875 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24876 info->lr_save_offset = 2*reg_size;
24877 break;
24879 case ABI_V4:
24880 info->fp_save_offset = -info->fp_size;
24881 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24882 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24884 if (TARGET_ALTIVEC_ABI)
24886 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24888 /* Align stack so vector save area is on a quadword boundary. */
24889 if (info->altivec_size != 0)
24890 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24892 info->altivec_save_offset = info->vrsave_save_offset
24893 - info->altivec_padding_size
24894 - info->altivec_size;
24896 /* Adjust for AltiVec case. */
24897 info->ehrd_offset = info->altivec_save_offset;
24899 else
24900 info->ehrd_offset = info->cr_save_offset;
24902 info->ehrd_offset -= ehrd_size;
24903 info->lr_save_offset = reg_size;
24906 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24907 info->save_size = RS6000_ALIGN (info->fp_size
24908 + info->gp_size
24909 + info->altivec_size
24910 + info->altivec_padding_size
24911 + ehrd_size
24912 + ehcr_size
24913 + info->cr_size
24914 + info->vrsave_size,
24915 save_align);
24917 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24919 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24920 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24922 /* Determine if we need to save the link register. */
24923 if (info->calls_p
24924 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24925 && crtl->profile
24926 && !TARGET_PROFILE_KERNEL)
24927 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24928 #ifdef TARGET_RELOCATABLE
24929 || (DEFAULT_ABI == ABI_V4
24930 && (TARGET_RELOCATABLE || flag_pic > 1)
24931 && !constant_pool_empty_p ())
24932 #endif
24933 || rs6000_ra_ever_killed ())
24934 info->lr_save_p = 1;
24936 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24937 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24938 && call_used_regs[STATIC_CHAIN_REGNUM]);
24939 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24941 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24942 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24943 || !(info->savres_strategy & SAVE_INLINE_VRS)
24944 || !(info->savres_strategy & REST_INLINE_GPRS)
24945 || !(info->savres_strategy & REST_INLINE_FPRS)
24946 || !(info->savres_strategy & REST_INLINE_VRS))
24947 info->lr_save_p = 1;
24949 if (info->lr_save_p)
24950 df_set_regs_ever_live (LR_REGNO, true);
24952 /* Determine if we need to allocate any stack frame:
24954 For AIX we need to push the stack if a frame pointer is needed
24955 (because the stack might be dynamically adjusted), if we are
24956 debugging, if we make calls, or if the sum of fp_save, gp_save,
24957 and local variables are more than the space needed to save all
24958 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24959 + 18*8 = 288 (GPR13 reserved).
24961 For V.4 we don't have the stack cushion that AIX uses, but assume
24962 that the debugger can handle stackless frames. */
24964 if (info->calls_p)
24965 info->push_p = 1;
24967 else if (DEFAULT_ABI == ABI_V4)
24968 info->push_p = non_fixed_size != 0;
24970 else if (frame_pointer_needed)
24971 info->push_p = 1;
24973 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24974 info->push_p = 1;
24976 else
24977 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24979 return info;
24982 static void
24983 debug_stack_info (rs6000_stack_t *info)
24985 const char *abi_string;
24987 if (! info)
24988 info = rs6000_stack_info ();
24990 fprintf (stderr, "\nStack information for function %s:\n",
24991 ((current_function_decl && DECL_NAME (current_function_decl))
24992 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24993 : "<unknown>"));
24995 switch (info->abi)
24997 default: abi_string = "Unknown"; break;
24998 case ABI_NONE: abi_string = "NONE"; break;
24999 case ABI_AIX: abi_string = "AIX"; break;
25000 case ABI_ELFv2: abi_string = "ELFv2"; break;
25001 case ABI_DARWIN: abi_string = "Darwin"; break;
25002 case ABI_V4: abi_string = "V.4"; break;
25005 fprintf (stderr, "\tABI = %5s\n", abi_string);
25007 if (TARGET_ALTIVEC_ABI)
25008 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
25010 if (info->first_gp_reg_save != 32)
25011 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
25013 if (info->first_fp_reg_save != 64)
25014 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
25016 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
25017 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
25018 info->first_altivec_reg_save);
25020 if (info->lr_save_p)
25021 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
25023 if (info->cr_save_p)
25024 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
25026 if (info->vrsave_mask)
25027 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
25029 if (info->push_p)
25030 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
25032 if (info->calls_p)
25033 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
25035 if (info->gp_size)
25036 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
25038 if (info->fp_size)
25039 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
25041 if (info->altivec_size)
25042 fprintf (stderr, "\taltivec_save_offset = %5d\n",
25043 info->altivec_save_offset);
25045 if (info->vrsave_size)
25046 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
25047 info->vrsave_save_offset);
25049 if (info->lr_save_p)
25050 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
25052 if (info->cr_save_p)
25053 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
25055 if (info->varargs_save_offset)
25056 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
25058 if (info->total_size)
25059 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25060 info->total_size);
25062 if (info->vars_size)
25063 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25064 info->vars_size);
25066 if (info->parm_size)
25067 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25069 if (info->fixed_size)
25070 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25072 if (info->gp_size)
25073 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25075 if (info->fp_size)
25076 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25078 if (info->altivec_size)
25079 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25081 if (info->vrsave_size)
25082 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25084 if (info->altivec_padding_size)
25085 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25086 info->altivec_padding_size);
25088 if (info->cr_size)
25089 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25091 if (info->save_size)
25092 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25094 if (info->reg_size != 4)
25095 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25097 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25099 fprintf (stderr, "\n");
25103 rs6000_return_addr (int count, rtx frame)
25105 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25106 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25107 if (count != 0
25108 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25110 cfun->machine->ra_needs_full_frame = 1;
25112 if (count == 0)
25113 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25114 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25115 frame = stack_pointer_rtx;
25116 rtx prev_frame_addr = memory_address (Pmode, frame);
25117 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25118 rtx lr_save_off = plus_constant (Pmode,
25119 prev_frame, RETURN_ADDRESS_OFFSET);
25120 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25121 return gen_rtx_MEM (Pmode, lr_save_addr);
25124 cfun->machine->ra_need_lr = 1;
25125 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25128 /* Say whether a function is a candidate for sibcall handling or not. */
25130 static bool
25131 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25133 tree fntype;
25135 if (decl)
25136 fntype = TREE_TYPE (decl);
25137 else
25138 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25140 /* We can't do it if the called function has more vector parameters
25141 than the current function; there's nowhere to put the VRsave code. */
25142 if (TARGET_ALTIVEC_ABI
25143 && TARGET_ALTIVEC_VRSAVE
25144 && !(decl && decl == current_function_decl))
25146 function_args_iterator args_iter;
25147 tree type;
25148 int nvreg = 0;
25150 /* Functions with vector parameters are required to have a
25151 prototype, so the argument type info must be available
25152 here. */
25153 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25154 if (TREE_CODE (type) == VECTOR_TYPE
25155 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25156 nvreg++;
25158 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25159 if (TREE_CODE (type) == VECTOR_TYPE
25160 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25161 nvreg--;
25163 if (nvreg > 0)
25164 return false;
25167 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25168 functions, because the callee may have a different TOC pointer to
25169 the caller and there's no way to ensure we restore the TOC when
25170 we return. With the secure-plt SYSV ABI we can't make non-local
25171 calls when -fpic/PIC because the plt call stubs use r30. */
25172 if (DEFAULT_ABI == ABI_DARWIN
25173 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25174 && decl
25175 && !DECL_EXTERNAL (decl)
25176 && !DECL_WEAK (decl)
25177 && (*targetm.binds_local_p) (decl))
25178 || (DEFAULT_ABI == ABI_V4
25179 && (!TARGET_SECURE_PLT
25180 || !flag_pic
25181 || (decl
25182 && (*targetm.binds_local_p) (decl)))))
25184 tree attr_list = TYPE_ATTRIBUTES (fntype);
25186 if (!lookup_attribute ("longcall", attr_list)
25187 || lookup_attribute ("shortcall", attr_list))
25188 return true;
25191 return false;
25194 static int
25195 rs6000_ra_ever_killed (void)
25197 rtx_insn *top;
25198 rtx reg;
25199 rtx_insn *insn;
25201 if (cfun->is_thunk)
25202 return 0;
25204 if (cfun->machine->lr_save_state)
25205 return cfun->machine->lr_save_state - 1;
25207 /* regs_ever_live has LR marked as used if any sibcalls are present,
25208 but this should not force saving and restoring in the
25209 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25210 clobbers LR, so that is inappropriate. */
25212 /* Also, the prologue can generate a store into LR that
25213 doesn't really count, like this:
25215 move LR->R0
25216 bcl to set PIC register
25217 move LR->R31
25218 move R0->LR
25220 When we're called from the epilogue, we need to avoid counting
25221 this as a store. */
25223 push_topmost_sequence ();
25224 top = get_insns ();
25225 pop_topmost_sequence ();
25226 reg = gen_rtx_REG (Pmode, LR_REGNO);
25228 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25230 if (INSN_P (insn))
25232 if (CALL_P (insn))
25234 if (!SIBLING_CALL_P (insn))
25235 return 1;
25237 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25238 return 1;
25239 else if (set_of (reg, insn) != NULL_RTX
25240 && !prologue_epilogue_contains (insn))
25241 return 1;
25244 return 0;
25247 /* Emit instructions needed to load the TOC register.
25248 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25249 a constant pool; or for SVR4 -fpic. */
25251 void
25252 rs6000_emit_load_toc_table (int fromprolog)
25254 rtx dest;
25255 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25257 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25259 char buf[30];
25260 rtx lab, tmp1, tmp2, got;
25262 lab = gen_label_rtx ();
25263 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25264 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25265 if (flag_pic == 2)
25267 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25268 need_toc_init = 1;
25270 else
25271 got = rs6000_got_sym ();
25272 tmp1 = tmp2 = dest;
25273 if (!fromprolog)
25275 tmp1 = gen_reg_rtx (Pmode);
25276 tmp2 = gen_reg_rtx (Pmode);
25278 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25279 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25280 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25281 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25283 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25285 emit_insn (gen_load_toc_v4_pic_si ());
25286 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25288 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25290 char buf[30];
25291 rtx temp0 = (fromprolog
25292 ? gen_rtx_REG (Pmode, 0)
25293 : gen_reg_rtx (Pmode));
25295 if (fromprolog)
25297 rtx symF, symL;
25299 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25300 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25302 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25303 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25305 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25306 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25307 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25309 else
25311 rtx tocsym, lab;
25313 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25314 need_toc_init = 1;
25315 lab = gen_label_rtx ();
25316 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25317 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25318 if (TARGET_LINK_STACK)
25319 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25320 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25322 emit_insn (gen_addsi3 (dest, temp0, dest));
25324 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25326 /* This is for AIX code running in non-PIC ELF32. */
25327 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25329 need_toc_init = 1;
25330 emit_insn (gen_elf_high (dest, realsym));
25331 emit_insn (gen_elf_low (dest, dest, realsym));
25333 else
25335 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25337 if (TARGET_32BIT)
25338 emit_insn (gen_load_toc_aix_si (dest));
25339 else
25340 emit_insn (gen_load_toc_aix_di (dest));
25344 /* Emit instructions to restore the link register after determining where
25345 its value has been stored. */
25347 void
25348 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25350 rs6000_stack_t *info = rs6000_stack_info ();
25351 rtx operands[2];
25353 operands[0] = source;
25354 operands[1] = scratch;
25356 if (info->lr_save_p)
25358 rtx frame_rtx = stack_pointer_rtx;
25359 HOST_WIDE_INT sp_offset = 0;
25360 rtx tmp;
25362 if (frame_pointer_needed
25363 || cfun->calls_alloca
25364 || info->total_size > 32767)
25366 tmp = gen_frame_mem (Pmode, frame_rtx);
25367 emit_move_insn (operands[1], tmp);
25368 frame_rtx = operands[1];
25370 else if (info->push_p)
25371 sp_offset = info->total_size;
25373 tmp = plus_constant (Pmode, frame_rtx,
25374 info->lr_save_offset + sp_offset);
25375 tmp = gen_frame_mem (Pmode, tmp);
25376 emit_move_insn (tmp, operands[0]);
25378 else
25379 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25381 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25382 state of lr_save_p so any change from here on would be a bug. In
25383 particular, stop rs6000_ra_ever_killed from considering the SET
25384 of lr we may have added just above. */
25385 cfun->machine->lr_save_state = info->lr_save_p + 1;
25388 static GTY(()) alias_set_type set = -1;
25390 alias_set_type
25391 get_TOC_alias_set (void)
25393 if (set == -1)
25394 set = new_alias_set ();
25395 return set;
25398 /* This returns nonzero if the current function uses the TOC. This is
25399 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25400 is generated by the ABI_V4 load_toc_* patterns.
25401 Return 2 instead of 1 if the load_toc_* pattern is in the function
25402 partition that doesn't start the function. */
25403 #if TARGET_ELF
25404 static int
25405 uses_TOC (void)
25407 rtx_insn *insn;
25408 int ret = 1;
25410 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25412 if (INSN_P (insn))
25414 rtx pat = PATTERN (insn);
25415 int i;
25417 if (GET_CODE (pat) == PARALLEL)
25418 for (i = 0; i < XVECLEN (pat, 0); i++)
25420 rtx sub = XVECEXP (pat, 0, i);
25421 if (GET_CODE (sub) == USE)
25423 sub = XEXP (sub, 0);
25424 if (GET_CODE (sub) == UNSPEC
25425 && XINT (sub, 1) == UNSPEC_TOC)
25426 return ret;
25430 else if (crtl->has_bb_partition
25431 && NOTE_P (insn)
25432 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25433 ret = 2;
25435 return 0;
25437 #endif
25440 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25442 rtx tocrel, tocreg, hi;
25444 if (TARGET_DEBUG_ADDR)
25446 if (GET_CODE (symbol) == SYMBOL_REF)
25447 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25448 XSTR (symbol, 0));
25449 else
25451 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25452 GET_RTX_NAME (GET_CODE (symbol)));
25453 debug_rtx (symbol);
25457 if (!can_create_pseudo_p ())
25458 df_set_regs_ever_live (TOC_REGISTER, true);
25460 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25461 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25462 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25463 return tocrel;
25465 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25466 if (largetoc_reg != NULL)
25468 emit_move_insn (largetoc_reg, hi);
25469 hi = largetoc_reg;
25471 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25474 /* Issue assembly directives that create a reference to the given DWARF
25475 FRAME_TABLE_LABEL from the current function section. */
25476 void
25477 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25479 fprintf (asm_out_file, "\t.ref %s\n",
25480 (* targetm.strip_name_encoding) (frame_table_label));
25483 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25484 and the change to the stack pointer. */
25486 static void
25487 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25489 rtvec p;
25490 int i;
25491 rtx regs[3];
25493 i = 0;
25494 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25495 if (hard_frame_needed)
25496 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25497 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25498 || (hard_frame_needed
25499 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25500 regs[i++] = fp;
25502 p = rtvec_alloc (i);
25503 while (--i >= 0)
25505 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25506 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25509 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25512 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25513 and set the appropriate attributes for the generated insn. Return the
25514 first insn which adjusts the stack pointer or the last insn before
25515 the stack adjustment loop.
25517 SIZE_INT is used to create the CFI note for the allocation.
25519 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25520 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25522 ORIG_SP contains the backchain value that must be stored at *sp. */
25524 static rtx_insn *
25525 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25527 rtx_insn *insn;
25529 rtx size_rtx = GEN_INT (-size_int);
25530 if (size_int > 32767)
25532 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25533 /* Need a note here so that try_split doesn't get confused. */
25534 if (get_last_insn () == NULL_RTX)
25535 emit_note (NOTE_INSN_DELETED);
25536 insn = emit_move_insn (tmp_reg, size_rtx);
25537 try_split (PATTERN (insn), insn, 0);
25538 size_rtx = tmp_reg;
25541 if (Pmode == SImode)
25542 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25543 stack_pointer_rtx,
25544 size_rtx,
25545 orig_sp));
25546 else
25547 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25548 stack_pointer_rtx,
25549 size_rtx,
25550 orig_sp));
25551 rtx par = PATTERN (insn);
25552 gcc_assert (GET_CODE (par) == PARALLEL);
25553 rtx set = XVECEXP (par, 0, 0);
25554 gcc_assert (GET_CODE (set) == SET);
25555 rtx mem = SET_DEST (set);
25556 gcc_assert (MEM_P (mem));
25557 MEM_NOTRAP_P (mem) = 1;
25558 set_mem_alias_set (mem, get_frame_alias_set ());
25560 RTX_FRAME_RELATED_P (insn) = 1;
25561 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25562 gen_rtx_SET (stack_pointer_rtx,
25563 gen_rtx_PLUS (Pmode,
25564 stack_pointer_rtx,
25565 GEN_INT (-size_int))));
25567 /* Emit a blockage to ensure the allocation/probing insns are
25568 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25569 note for similar reasons. */
25570 if (flag_stack_clash_protection)
25572 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25573 emit_insn (gen_blockage ());
25576 return insn;
25579 static HOST_WIDE_INT
25580 get_stack_clash_protection_probe_interval (void)
25582 return (HOST_WIDE_INT_1U
25583 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25586 static HOST_WIDE_INT
25587 get_stack_clash_protection_guard_size (void)
25589 return (HOST_WIDE_INT_1U
25590 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25593 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25594 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25596 COPY_REG, if non-null, should contain a copy of the original
25597 stack pointer at exit from this function.
25599 This is subtly different than the Ada probing in that it tries hard to
25600 prevent attacks that jump the stack guard. Thus it is never allowed to
25601 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25602 space without a suitable probe. */
25603 static rtx_insn *
25604 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25605 rtx copy_reg)
25607 rtx orig_sp = copy_reg;
25609 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25611 /* Round the size down to a multiple of PROBE_INTERVAL. */
25612 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25614 /* If explicitly requested,
25615 or the rounded size is not the same as the original size
25616 or the the rounded size is greater than a page,
25617 then we will need a copy of the original stack pointer. */
25618 if (rounded_size != orig_size
25619 || rounded_size > probe_interval
25620 || copy_reg)
25622 /* If the caller did not request a copy of the incoming stack
25623 pointer, then we use r0 to hold the copy. */
25624 if (!copy_reg)
25625 orig_sp = gen_rtx_REG (Pmode, 0);
25626 emit_move_insn (orig_sp, stack_pointer_rtx);
25629 /* There's three cases here.
25631 One is a single probe which is the most common and most efficiently
25632 implemented as it does not have to have a copy of the original
25633 stack pointer if there are no residuals.
25635 Second is unrolled allocation/probes which we use if there's just
25636 a few of them. It needs to save the original stack pointer into a
25637 temporary for use as a source register in the allocation/probe.
25639 Last is a loop. This is the most uncommon case and least efficient. */
25640 rtx_insn *retval = NULL;
25641 if (rounded_size == probe_interval)
25643 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25645 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25647 else if (rounded_size <= 8 * probe_interval)
25649 /* The ABI requires using the store with update insns to allocate
25650 space and store the backchain into the stack
25652 So we save the current stack pointer into a temporary, then
25653 emit the store-with-update insns to store the saved stack pointer
25654 into the right location in each new page. */
25655 for (int i = 0; i < rounded_size; i += probe_interval)
25657 rtx_insn *insn
25658 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25660 /* Save the first stack adjustment in RETVAL. */
25661 if (i == 0)
25662 retval = insn;
25665 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25667 else
25669 /* Compute the ending address. */
25670 rtx end_addr
25671 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25672 rtx rs = GEN_INT (-rounded_size);
25673 rtx_insn *insn;
25674 if (add_operand (rs, Pmode))
25675 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25676 else
25678 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25679 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25680 stack_pointer_rtx));
25681 /* Describe the effect of INSN to the CFI engine. */
25682 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25683 gen_rtx_SET (end_addr,
25684 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25685 rs)));
25687 RTX_FRAME_RELATED_P (insn) = 1;
25689 /* Emit the loop. */
25690 if (TARGET_64BIT)
25691 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25692 stack_pointer_rtx, orig_sp,
25693 end_addr));
25694 else
25695 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25696 stack_pointer_rtx, orig_sp,
25697 end_addr));
25698 RTX_FRAME_RELATED_P (retval) = 1;
25699 /* Describe the effect of INSN to the CFI engine. */
25700 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25701 gen_rtx_SET (stack_pointer_rtx, end_addr));
25703 /* Emit a blockage to ensure the allocation/probing insns are
25704 not optimized, combined, removed, etc. Other cases handle this
25705 within their call to rs6000_emit_allocate_stack_1. */
25706 emit_insn (gen_blockage ());
25708 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25711 if (orig_size != rounded_size)
25713 /* Allocate (and implicitly probe) any residual space. */
25714 HOST_WIDE_INT residual = orig_size - rounded_size;
25716 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25718 /* If the residual was the only allocation, then we can return the
25719 allocating insn. */
25720 if (!retval)
25721 retval = insn;
25724 return retval;
25727 /* Emit the correct code for allocating stack space, as insns.
25728 If COPY_REG, make sure a copy of the old frame is left there.
25729 The generated code may use hard register 0 as a temporary. */
25731 static rtx_insn *
25732 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25734 rtx_insn *insn;
25735 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25736 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25737 rtx todec = gen_int_mode (-size, Pmode);
25739 if (INTVAL (todec) != -size)
25741 warning (0, "stack frame too large");
25742 emit_insn (gen_trap ());
25743 return 0;
25746 if (crtl->limit_stack)
25748 if (REG_P (stack_limit_rtx)
25749 && REGNO (stack_limit_rtx) > 1
25750 && REGNO (stack_limit_rtx) <= 31)
25752 rtx_insn *insn
25753 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25754 gcc_assert (insn);
25755 emit_insn (insn);
25756 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25758 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25759 && TARGET_32BIT
25760 && DEFAULT_ABI == ABI_V4
25761 && !flag_pic)
25763 rtx toload = gen_rtx_CONST (VOIDmode,
25764 gen_rtx_PLUS (Pmode,
25765 stack_limit_rtx,
25766 GEN_INT (size)));
25768 emit_insn (gen_elf_high (tmp_reg, toload));
25769 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25770 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25771 const0_rtx));
25773 else
25774 warning (0, "stack limit expression is not supported");
25777 if (flag_stack_clash_protection)
25779 if (size < get_stack_clash_protection_guard_size ())
25780 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25781 else
25783 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25784 copy_reg);
25786 /* If we asked for a copy with an offset, then we still need add in
25787 the offset. */
25788 if (copy_reg && copy_off)
25789 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25790 return insn;
25794 if (copy_reg)
25796 if (copy_off != 0)
25797 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25798 else
25799 emit_move_insn (copy_reg, stack_reg);
25802 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25803 it now and set the alias set/attributes. The above gen_*_update
25804 calls will generate a PARALLEL with the MEM set being the first
25805 operation. */
25806 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25807 return insn;
25810 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25812 #if PROBE_INTERVAL > 32768
25813 #error Cannot use indexed addressing mode for stack probing
25814 #endif
25816 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25817 inclusive. These are offsets from the current stack pointer. */
25819 static void
25820 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25822 /* See if we have a constant small number of probes to generate. If so,
25823 that's the easy case. */
25824 if (first + size <= 32768)
25826 HOST_WIDE_INT i;
25828 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25829 it exceeds SIZE. If only one probe is needed, this will not
25830 generate any code. Then probe at FIRST + SIZE. */
25831 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25832 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25833 -(first + i)));
25835 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25836 -(first + size)));
25839 /* Otherwise, do the same as above, but in a loop. Note that we must be
25840 extra careful with variables wrapping around because we might be at
25841 the very top (or the very bottom) of the address space and we have
25842 to be able to handle this case properly; in particular, we use an
25843 equality test for the loop condition. */
25844 else
25846 HOST_WIDE_INT rounded_size;
25847 rtx r12 = gen_rtx_REG (Pmode, 12);
25848 rtx r0 = gen_rtx_REG (Pmode, 0);
25850 /* Sanity check for the addressing mode we're going to use. */
25851 gcc_assert (first <= 32768);
25853 /* Step 1: round SIZE to the previous multiple of the interval. */
25855 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25858 /* Step 2: compute initial and final value of the loop counter. */
25860 /* TEST_ADDR = SP + FIRST. */
25861 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25862 -first)));
25864 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25865 if (rounded_size > 32768)
25867 emit_move_insn (r0, GEN_INT (-rounded_size));
25868 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25870 else
25871 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25872 -rounded_size)));
25875 /* Step 3: the loop
25879 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25880 probe at TEST_ADDR
25882 while (TEST_ADDR != LAST_ADDR)
25884 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25885 until it is equal to ROUNDED_SIZE. */
25887 if (TARGET_64BIT)
25888 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25889 else
25890 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25893 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25894 that SIZE is equal to ROUNDED_SIZE. */
25896 if (size != rounded_size)
25897 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25901 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25902 addresses, not offsets. */
25904 static const char *
25905 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25907 static int labelno = 0;
25908 char loop_lab[32];
25909 rtx xops[2];
25911 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25913 /* Loop. */
25914 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25916 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25917 xops[0] = reg1;
25918 xops[1] = GEN_INT (-PROBE_INTERVAL);
25919 output_asm_insn ("addi %0,%0,%1", xops);
25921 /* Probe at TEST_ADDR. */
25922 xops[1] = gen_rtx_REG (Pmode, 0);
25923 output_asm_insn ("stw %1,0(%0)", xops);
25925 /* Test if TEST_ADDR == LAST_ADDR. */
25926 xops[1] = reg2;
25927 if (TARGET_64BIT)
25928 output_asm_insn ("cmpd 0,%0,%1", xops);
25929 else
25930 output_asm_insn ("cmpw 0,%0,%1", xops);
25932 /* Branch. */
25933 fputs ("\tbne 0,", asm_out_file);
25934 assemble_name_raw (asm_out_file, loop_lab);
25935 fputc ('\n', asm_out_file);
25937 return "";
25940 /* This function is called when rs6000_frame_related is processing
25941 SETs within a PARALLEL, and returns whether the REGNO save ought to
25942 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25943 for out-of-line register save functions, store multiple, and the
25944 Darwin world_save. They may contain registers that don't really
25945 need saving. */
25947 static bool
25948 interesting_frame_related_regno (unsigned int regno)
25950 /* Saves apparently of r0 are actually saving LR. It doesn't make
25951 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25952 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25953 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25954 as frame related. */
25955 if (regno == 0)
25956 return true;
25957 /* If we see CR2 then we are here on a Darwin world save. Saves of
25958 CR2 signify the whole CR is being saved. This is a long-standing
25959 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25960 that CR needs to be saved. */
25961 if (regno == CR2_REGNO)
25962 return true;
25963 /* Omit frame info for any user-defined global regs. If frame info
25964 is supplied for them, frame unwinding will restore a user reg.
25965 Also omit frame info for any reg we don't need to save, as that
25966 bloats frame info and can cause problems with shrink wrapping.
25967 Since global regs won't be seen as needing to be saved, both of
25968 these conditions are covered by save_reg_p. */
25969 return save_reg_p (regno);
25972 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25973 addresses, not offsets.
25975 REG2 contains the backchain that must be stored into *sp at each allocation.
25977 This is subtly different than the Ada probing above in that it tries hard
25978 to prevent attacks that jump the stack guard. Thus, it is never allowed
25979 to allocate more than PROBE_INTERVAL bytes of stack space without a
25980 suitable probe. */
25982 static const char *
25983 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25985 static int labelno = 0;
25986 char loop_lab[32];
25987 rtx xops[3];
25989 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25991 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25993 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25995 /* This allocates and probes. */
25996 xops[0] = reg1;
25997 xops[1] = reg2;
25998 xops[2] = GEN_INT (-probe_interval);
25999 if (TARGET_64BIT)
26000 output_asm_insn ("stdu %1,%2(%0)", xops);
26001 else
26002 output_asm_insn ("stwu %1,%2(%0)", xops);
26004 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
26005 xops[0] = reg1;
26006 xops[1] = reg3;
26007 if (TARGET_64BIT)
26008 output_asm_insn ("cmpd 0,%0,%1", xops);
26009 else
26010 output_asm_insn ("cmpw 0,%0,%1", xops);
26012 fputs ("\tbne 0,", asm_out_file);
26013 assemble_name_raw (asm_out_file, loop_lab);
26014 fputc ('\n', asm_out_file);
26016 return "";
26019 /* Wrapper around the output_probe_stack_range routines. */
26020 const char *
26021 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
26023 if (flag_stack_clash_protection)
26024 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
26025 else
26026 return output_probe_stack_range_1 (reg1, reg3);
26029 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
26030 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
26031 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
26032 deduce these equivalences by itself so it wasn't necessary to hold
26033 its hand so much. Don't be tempted to always supply d2_f_d_e with
26034 the actual cfa register, ie. r31 when we are using a hard frame
26035 pointer. That fails when saving regs off r1, and sched moves the
26036 r31 setup past the reg saves. */
26038 static rtx_insn *
26039 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
26040 rtx reg2, rtx repl2)
26042 rtx repl;
26044 if (REGNO (reg) == STACK_POINTER_REGNUM)
26046 gcc_checking_assert (val == 0);
26047 repl = NULL_RTX;
26049 else
26050 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26051 GEN_INT (val));
26053 rtx pat = PATTERN (insn);
26054 if (!repl && !reg2)
26056 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26057 if (GET_CODE (pat) == PARALLEL)
26058 for (int i = 0; i < XVECLEN (pat, 0); i++)
26059 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26061 rtx set = XVECEXP (pat, 0, i);
26063 if (!REG_P (SET_SRC (set))
26064 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26065 RTX_FRAME_RELATED_P (set) = 1;
26067 RTX_FRAME_RELATED_P (insn) = 1;
26068 return insn;
26071 /* We expect that 'pat' is either a SET or a PARALLEL containing
26072 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26073 are important so they all have to be marked RTX_FRAME_RELATED_P.
26074 Call simplify_replace_rtx on the SETs rather than the whole insn
26075 so as to leave the other stuff alone (for example USE of r12). */
26077 set_used_flags (pat);
26078 if (GET_CODE (pat) == SET)
26080 if (repl)
26081 pat = simplify_replace_rtx (pat, reg, repl);
26082 if (reg2)
26083 pat = simplify_replace_rtx (pat, reg2, repl2);
26085 else if (GET_CODE (pat) == PARALLEL)
26087 pat = shallow_copy_rtx (pat);
26088 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26090 for (int i = 0; i < XVECLEN (pat, 0); i++)
26091 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26093 rtx set = XVECEXP (pat, 0, i);
26095 if (repl)
26096 set = simplify_replace_rtx (set, reg, repl);
26097 if (reg2)
26098 set = simplify_replace_rtx (set, reg2, repl2);
26099 XVECEXP (pat, 0, i) = set;
26101 if (!REG_P (SET_SRC (set))
26102 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26103 RTX_FRAME_RELATED_P (set) = 1;
26106 else
26107 gcc_unreachable ();
26109 RTX_FRAME_RELATED_P (insn) = 1;
26110 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26112 return insn;
26115 /* Returns an insn that has a vrsave set operation with the
26116 appropriate CLOBBERs. */
26118 static rtx
26119 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26121 int nclobs, i;
26122 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26123 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26125 clobs[0]
26126 = gen_rtx_SET (vrsave,
26127 gen_rtx_UNSPEC_VOLATILE (SImode,
26128 gen_rtvec (2, reg, vrsave),
26129 UNSPECV_SET_VRSAVE));
26131 nclobs = 1;
26133 /* We need to clobber the registers in the mask so the scheduler
26134 does not move sets to VRSAVE before sets of AltiVec registers.
26136 However, if the function receives nonlocal gotos, reload will set
26137 all call saved registers live. We will end up with:
26139 (set (reg 999) (mem))
26140 (parallel [ (set (reg vrsave) (unspec blah))
26141 (clobber (reg 999))])
26143 The clobber will cause the store into reg 999 to be dead, and
26144 flow will attempt to delete an epilogue insn. In this case, we
26145 need an unspec use/set of the register. */
26147 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26148 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26150 if (!epiloguep || call_used_regs [i])
26151 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
26152 gen_rtx_REG (V4SImode, i));
26153 else
26155 rtx reg = gen_rtx_REG (V4SImode, i);
26157 clobs[nclobs++]
26158 = gen_rtx_SET (reg,
26159 gen_rtx_UNSPEC (V4SImode,
26160 gen_rtvec (1, reg), 27));
26164 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26166 for (i = 0; i < nclobs; ++i)
26167 XVECEXP (insn, 0, i) = clobs[i];
26169 return insn;
26172 static rtx
26173 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26175 rtx addr, mem;
26177 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26178 mem = gen_frame_mem (GET_MODE (reg), addr);
26179 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26182 static rtx
26183 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26185 return gen_frame_set (reg, frame_reg, offset, false);
26188 static rtx
26189 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26191 return gen_frame_set (reg, frame_reg, offset, true);
26194 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26195 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26197 static rtx_insn *
26198 emit_frame_save (rtx frame_reg, machine_mode mode,
26199 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26201 rtx reg;
26203 /* Some cases that need register indexed addressing. */
26204 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26205 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26207 reg = gen_rtx_REG (mode, regno);
26208 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26209 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26210 NULL_RTX, NULL_RTX);
26213 /* Emit an offset memory reference suitable for a frame store, while
26214 converting to a valid addressing mode. */
26216 static rtx
26217 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26219 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26222 #ifndef TARGET_FIX_AND_CONTINUE
26223 #define TARGET_FIX_AND_CONTINUE 0
26224 #endif
26226 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26227 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26228 #define LAST_SAVRES_REGISTER 31
26229 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26231 enum {
26232 SAVRES_LR = 0x1,
26233 SAVRES_SAVE = 0x2,
26234 SAVRES_REG = 0x0c,
26235 SAVRES_GPR = 0,
26236 SAVRES_FPR = 4,
26237 SAVRES_VR = 8
26240 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26242 /* Temporary holding space for an out-of-line register save/restore
26243 routine name. */
26244 static char savres_routine_name[30];
26246 /* Return the name for an out-of-line register save/restore routine.
26247 We are saving/restoring GPRs if GPR is true. */
26249 static char *
26250 rs6000_savres_routine_name (int regno, int sel)
26252 const char *prefix = "";
26253 const char *suffix = "";
26255 /* Different targets are supposed to define
26256 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26257 routine name could be defined with:
26259 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26261 This is a nice idea in practice, but in reality, things are
26262 complicated in several ways:
26264 - ELF targets have save/restore routines for GPRs.
26266 - PPC64 ELF targets have routines for save/restore of GPRs that
26267 differ in what they do with the link register, so having a set
26268 prefix doesn't work. (We only use one of the save routines at
26269 the moment, though.)
26271 - PPC32 elf targets have "exit" versions of the restore routines
26272 that restore the link register and can save some extra space.
26273 These require an extra suffix. (There are also "tail" versions
26274 of the restore routines and "GOT" versions of the save routines,
26275 but we don't generate those at present. Same problems apply,
26276 though.)
26278 We deal with all this by synthesizing our own prefix/suffix and
26279 using that for the simple sprintf call shown above. */
26280 if (DEFAULT_ABI == ABI_V4)
26282 if (TARGET_64BIT)
26283 goto aix_names;
26285 if ((sel & SAVRES_REG) == SAVRES_GPR)
26286 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26287 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26288 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26289 else if ((sel & SAVRES_REG) == SAVRES_VR)
26290 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26291 else
26292 abort ();
26294 if ((sel & SAVRES_LR))
26295 suffix = "_x";
26297 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26299 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26300 /* No out-of-line save/restore routines for GPRs on AIX. */
26301 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26302 #endif
26304 aix_names:
26305 if ((sel & SAVRES_REG) == SAVRES_GPR)
26306 prefix = ((sel & SAVRES_SAVE)
26307 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26308 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26309 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26311 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26312 if ((sel & SAVRES_LR))
26313 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26314 else
26315 #endif
26317 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26318 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26321 else if ((sel & SAVRES_REG) == SAVRES_VR)
26322 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26323 else
26324 abort ();
26327 if (DEFAULT_ABI == ABI_DARWIN)
26329 /* The Darwin approach is (slightly) different, in order to be
26330 compatible with code generated by the system toolchain. There is a
26331 single symbol for the start of save sequence, and the code here
26332 embeds an offset into that code on the basis of the first register
26333 to be saved. */
26334 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26335 if ((sel & SAVRES_REG) == SAVRES_GPR)
26336 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26337 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26338 (regno - 13) * 4, prefix, regno);
26339 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26340 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26341 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26342 else if ((sel & SAVRES_REG) == SAVRES_VR)
26343 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26344 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26345 else
26346 abort ();
26348 else
26349 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26351 return savres_routine_name;
26354 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26355 We are saving/restoring GPRs if GPR is true. */
26357 static rtx
26358 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26360 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26361 ? info->first_gp_reg_save
26362 : (sel & SAVRES_REG) == SAVRES_FPR
26363 ? info->first_fp_reg_save - 32
26364 : (sel & SAVRES_REG) == SAVRES_VR
26365 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26366 : -1);
26367 rtx sym;
26368 int select = sel;
26370 /* Don't generate bogus routine names. */
26371 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26372 && regno <= LAST_SAVRES_REGISTER
26373 && select >= 0 && select <= 12);
26375 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26377 if (sym == NULL)
26379 char *name;
26381 name = rs6000_savres_routine_name (regno, sel);
26383 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26384 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26385 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26388 return sym;
26391 /* Emit a sequence of insns, including a stack tie if needed, for
26392 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26393 reset the stack pointer, but move the base of the frame into
26394 reg UPDT_REGNO for use by out-of-line register restore routines. */
26396 static rtx
26397 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26398 unsigned updt_regno)
26400 /* If there is nothing to do, don't do anything. */
26401 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26402 return NULL_RTX;
26404 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26406 /* This blockage is needed so that sched doesn't decide to move
26407 the sp change before the register restores. */
26408 if (DEFAULT_ABI == ABI_V4)
26409 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26410 GEN_INT (frame_off)));
26412 /* If we are restoring registers out-of-line, we will be using the
26413 "exit" variants of the restore routines, which will reset the
26414 stack for us. But we do need to point updt_reg into the
26415 right place for those routines. */
26416 if (frame_off != 0)
26417 return emit_insn (gen_add3_insn (updt_reg_rtx,
26418 frame_reg_rtx, GEN_INT (frame_off)));
26419 else
26420 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26422 return NULL_RTX;
26425 /* Return the register number used as a pointer by out-of-line
26426 save/restore functions. */
26428 static inline unsigned
26429 ptr_regno_for_savres (int sel)
26431 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26432 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26433 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26436 /* Construct a parallel rtx describing the effect of a call to an
26437 out-of-line register save/restore routine, and emit the insn
26438 or jump_insn as appropriate. */
26440 static rtx_insn *
26441 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26442 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26443 machine_mode reg_mode, int sel)
26445 int i;
26446 int offset, start_reg, end_reg, n_regs, use_reg;
26447 int reg_size = GET_MODE_SIZE (reg_mode);
26448 rtx sym;
26449 rtvec p;
26450 rtx par;
26451 rtx_insn *insn;
26453 offset = 0;
26454 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26455 ? info->first_gp_reg_save
26456 : (sel & SAVRES_REG) == SAVRES_FPR
26457 ? info->first_fp_reg_save
26458 : (sel & SAVRES_REG) == SAVRES_VR
26459 ? info->first_altivec_reg_save
26460 : -1);
26461 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26462 ? 32
26463 : (sel & SAVRES_REG) == SAVRES_FPR
26464 ? 64
26465 : (sel & SAVRES_REG) == SAVRES_VR
26466 ? LAST_ALTIVEC_REGNO + 1
26467 : -1);
26468 n_regs = end_reg - start_reg;
26469 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26470 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26471 + n_regs);
26473 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26474 RTVEC_ELT (p, offset++) = ret_rtx;
26476 RTVEC_ELT (p, offset++)
26477 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26479 sym = rs6000_savres_routine_sym (info, sel);
26480 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26482 use_reg = ptr_regno_for_savres (sel);
26483 if ((sel & SAVRES_REG) == SAVRES_VR)
26485 /* Vector regs are saved/restored using [reg+reg] addressing. */
26486 RTVEC_ELT (p, offset++)
26487 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26488 RTVEC_ELT (p, offset++)
26489 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26491 else
26492 RTVEC_ELT (p, offset++)
26493 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26495 for (i = 0; i < end_reg - start_reg; i++)
26496 RTVEC_ELT (p, i + offset)
26497 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26498 frame_reg_rtx, save_area_offset + reg_size * i,
26499 (sel & SAVRES_SAVE) != 0);
26501 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26502 RTVEC_ELT (p, i + offset)
26503 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26505 par = gen_rtx_PARALLEL (VOIDmode, p);
26507 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26509 insn = emit_jump_insn (par);
26510 JUMP_LABEL (insn) = ret_rtx;
26512 else
26513 insn = emit_insn (par);
26514 return insn;
26517 /* Emit prologue code to store CR fields that need to be saved into REG. This
26518 function should only be called when moving the non-volatile CRs to REG, it
26519 is not a general purpose routine to move the entire set of CRs to REG.
26520 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26521 volatile CRs. */
26523 static void
26524 rs6000_emit_prologue_move_from_cr (rtx reg)
26526 /* Only the ELFv2 ABI allows storing only selected fields. */
26527 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26529 int i, cr_reg[8], count = 0;
26531 /* Collect CR fields that must be saved. */
26532 for (i = 0; i < 8; i++)
26533 if (save_reg_p (CR0_REGNO + i))
26534 cr_reg[count++] = i;
26536 /* If it's just a single one, use mfcrf. */
26537 if (count == 1)
26539 rtvec p = rtvec_alloc (1);
26540 rtvec r = rtvec_alloc (2);
26541 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26542 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26543 RTVEC_ELT (p, 0)
26544 = gen_rtx_SET (reg,
26545 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26547 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26548 return;
26551 /* ??? It might be better to handle count == 2 / 3 cases here
26552 as well, using logical operations to combine the values. */
26555 emit_insn (gen_prologue_movesi_from_cr (reg));
26558 /* Return whether the split-stack arg pointer (r12) is used. */
26560 static bool
26561 split_stack_arg_pointer_used_p (void)
26563 /* If the pseudo holding the arg pointer is no longer a pseudo,
26564 then the arg pointer is used. */
26565 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26566 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26567 || (REGNO (cfun->machine->split_stack_arg_pointer)
26568 < FIRST_PSEUDO_REGISTER)))
26569 return true;
26571 /* Unfortunately we also need to do some code scanning, since
26572 r12 may have been substituted for the pseudo. */
26573 rtx_insn *insn;
26574 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26575 FOR_BB_INSNS (bb, insn)
26576 if (NONDEBUG_INSN_P (insn))
26578 /* A call destroys r12. */
26579 if (CALL_P (insn))
26580 return false;
26582 df_ref use;
26583 FOR_EACH_INSN_USE (use, insn)
26585 rtx x = DF_REF_REG (use);
26586 if (REG_P (x) && REGNO (x) == 12)
26587 return true;
26589 df_ref def;
26590 FOR_EACH_INSN_DEF (def, insn)
26592 rtx x = DF_REF_REG (def);
26593 if (REG_P (x) && REGNO (x) == 12)
26594 return false;
26597 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26600 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26602 static bool
26603 rs6000_global_entry_point_needed_p (void)
26605 /* Only needed for the ELFv2 ABI. */
26606 if (DEFAULT_ABI != ABI_ELFv2)
26607 return false;
26609 /* With -msingle-pic-base, we assume the whole program shares the same
26610 TOC, so no global entry point prologues are needed anywhere. */
26611 if (TARGET_SINGLE_PIC_BASE)
26612 return false;
26614 /* Ensure we have a global entry point for thunks. ??? We could
26615 avoid that if the target routine doesn't need a global entry point,
26616 but we do not know whether this is the case at this point. */
26617 if (cfun->is_thunk)
26618 return true;
26620 /* For regular functions, rs6000_emit_prologue sets this flag if the
26621 routine ever uses the TOC pointer. */
26622 return cfun->machine->r2_setup_needed;
26625 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26626 static sbitmap
26627 rs6000_get_separate_components (void)
26629 rs6000_stack_t *info = rs6000_stack_info ();
26631 if (WORLD_SAVE_P (info))
26632 return NULL;
26634 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26635 && !(info->savres_strategy & REST_MULTIPLE));
26637 /* Component 0 is the save/restore of LR (done via GPR0).
26638 Component 2 is the save of the TOC (GPR2).
26639 Components 13..31 are the save/restore of GPR13..GPR31.
26640 Components 46..63 are the save/restore of FPR14..FPR31. */
26642 cfun->machine->n_components = 64;
26644 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26645 bitmap_clear (components);
26647 int reg_size = TARGET_32BIT ? 4 : 8;
26648 int fp_reg_size = 8;
26650 /* The GPRs we need saved to the frame. */
26651 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26652 && (info->savres_strategy & REST_INLINE_GPRS))
26654 int offset = info->gp_save_offset;
26655 if (info->push_p)
26656 offset += info->total_size;
26658 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26660 if (IN_RANGE (offset, -0x8000, 0x7fff)
26661 && save_reg_p (regno))
26662 bitmap_set_bit (components, regno);
26664 offset += reg_size;
26668 /* Don't mess with the hard frame pointer. */
26669 if (frame_pointer_needed)
26670 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26672 /* Don't mess with the fixed TOC register. */
26673 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26674 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26675 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26676 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26678 /* The FPRs we need saved to the frame. */
26679 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26680 && (info->savres_strategy & REST_INLINE_FPRS))
26682 int offset = info->fp_save_offset;
26683 if (info->push_p)
26684 offset += info->total_size;
26686 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26688 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26689 bitmap_set_bit (components, regno);
26691 offset += fp_reg_size;
26695 /* Optimize LR save and restore if we can. This is component 0. Any
26696 out-of-line register save/restore routines need LR. */
26697 if (info->lr_save_p
26698 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26699 && (info->savres_strategy & SAVE_INLINE_GPRS)
26700 && (info->savres_strategy & REST_INLINE_GPRS)
26701 && (info->savres_strategy & SAVE_INLINE_FPRS)
26702 && (info->savres_strategy & REST_INLINE_FPRS)
26703 && (info->savres_strategy & SAVE_INLINE_VRS)
26704 && (info->savres_strategy & REST_INLINE_VRS))
26706 int offset = info->lr_save_offset;
26707 if (info->push_p)
26708 offset += info->total_size;
26709 if (IN_RANGE (offset, -0x8000, 0x7fff))
26710 bitmap_set_bit (components, 0);
26713 /* Optimize saving the TOC. This is component 2. */
26714 if (cfun->machine->save_toc_in_prologue)
26715 bitmap_set_bit (components, 2);
26717 return components;
26720 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26721 static sbitmap
26722 rs6000_components_for_bb (basic_block bb)
26724 rs6000_stack_t *info = rs6000_stack_info ();
26726 bitmap in = DF_LIVE_IN (bb);
26727 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26728 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26730 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26731 bitmap_clear (components);
26733 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26735 /* GPRs. */
26736 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26737 if (bitmap_bit_p (in, regno)
26738 || bitmap_bit_p (gen, regno)
26739 || bitmap_bit_p (kill, regno))
26740 bitmap_set_bit (components, regno);
26742 /* FPRs. */
26743 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26744 if (bitmap_bit_p (in, regno)
26745 || bitmap_bit_p (gen, regno)
26746 || bitmap_bit_p (kill, regno))
26747 bitmap_set_bit (components, regno);
26749 /* The link register. */
26750 if (bitmap_bit_p (in, LR_REGNO)
26751 || bitmap_bit_p (gen, LR_REGNO)
26752 || bitmap_bit_p (kill, LR_REGNO))
26753 bitmap_set_bit (components, 0);
26755 /* The TOC save. */
26756 if (bitmap_bit_p (in, TOC_REGNUM)
26757 || bitmap_bit_p (gen, TOC_REGNUM)
26758 || bitmap_bit_p (kill, TOC_REGNUM))
26759 bitmap_set_bit (components, 2);
26761 return components;
26764 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26765 static void
26766 rs6000_disqualify_components (sbitmap components, edge e,
26767 sbitmap edge_components, bool /*is_prologue*/)
26769 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26770 live where we want to place that code. */
26771 if (bitmap_bit_p (edge_components, 0)
26772 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26774 if (dump_file)
26775 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26776 "on entry to bb %d\n", e->dest->index);
26777 bitmap_clear_bit (components, 0);
26781 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26782 static void
26783 rs6000_emit_prologue_components (sbitmap components)
26785 rs6000_stack_t *info = rs6000_stack_info ();
26786 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26787 ? HARD_FRAME_POINTER_REGNUM
26788 : STACK_POINTER_REGNUM);
26790 machine_mode reg_mode = Pmode;
26791 int reg_size = TARGET_32BIT ? 4 : 8;
26792 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26793 ? DFmode : SFmode;
26794 int fp_reg_size = 8;
26796 /* Prologue for LR. */
26797 if (bitmap_bit_p (components, 0))
26799 rtx reg = gen_rtx_REG (reg_mode, 0);
26800 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26801 RTX_FRAME_RELATED_P (insn) = 1;
26802 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26804 int offset = info->lr_save_offset;
26805 if (info->push_p)
26806 offset += info->total_size;
26808 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26809 RTX_FRAME_RELATED_P (insn) = 1;
26810 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26811 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26812 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26815 /* Prologue for TOC. */
26816 if (bitmap_bit_p (components, 2))
26818 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26819 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26820 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26823 /* Prologue for the GPRs. */
26824 int offset = info->gp_save_offset;
26825 if (info->push_p)
26826 offset += info->total_size;
26828 for (int i = info->first_gp_reg_save; i < 32; i++)
26830 if (bitmap_bit_p (components, i))
26832 rtx reg = gen_rtx_REG (reg_mode, i);
26833 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26834 RTX_FRAME_RELATED_P (insn) = 1;
26835 rtx set = copy_rtx (single_set (insn));
26836 add_reg_note (insn, REG_CFA_OFFSET, set);
26839 offset += reg_size;
26842 /* Prologue for the FPRs. */
26843 offset = info->fp_save_offset;
26844 if (info->push_p)
26845 offset += info->total_size;
26847 for (int i = info->first_fp_reg_save; i < 64; i++)
26849 if (bitmap_bit_p (components, i))
26851 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26852 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26853 RTX_FRAME_RELATED_P (insn) = 1;
26854 rtx set = copy_rtx (single_set (insn));
26855 add_reg_note (insn, REG_CFA_OFFSET, set);
26858 offset += fp_reg_size;
26862 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26863 static void
26864 rs6000_emit_epilogue_components (sbitmap components)
26866 rs6000_stack_t *info = rs6000_stack_info ();
26867 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26868 ? HARD_FRAME_POINTER_REGNUM
26869 : STACK_POINTER_REGNUM);
26871 machine_mode reg_mode = Pmode;
26872 int reg_size = TARGET_32BIT ? 4 : 8;
26874 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26875 ? DFmode : SFmode;
26876 int fp_reg_size = 8;
26878 /* Epilogue for the FPRs. */
26879 int offset = info->fp_save_offset;
26880 if (info->push_p)
26881 offset += info->total_size;
26883 for (int i = info->first_fp_reg_save; i < 64; i++)
26885 if (bitmap_bit_p (components, i))
26887 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26888 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26889 RTX_FRAME_RELATED_P (insn) = 1;
26890 add_reg_note (insn, REG_CFA_RESTORE, reg);
26893 offset += fp_reg_size;
26896 /* Epilogue for the GPRs. */
26897 offset = info->gp_save_offset;
26898 if (info->push_p)
26899 offset += info->total_size;
26901 for (int i = info->first_gp_reg_save; i < 32; i++)
26903 if (bitmap_bit_p (components, i))
26905 rtx reg = gen_rtx_REG (reg_mode, i);
26906 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26907 RTX_FRAME_RELATED_P (insn) = 1;
26908 add_reg_note (insn, REG_CFA_RESTORE, reg);
26911 offset += reg_size;
26914 /* Epilogue for LR. */
26915 if (bitmap_bit_p (components, 0))
26917 int offset = info->lr_save_offset;
26918 if (info->push_p)
26919 offset += info->total_size;
26921 rtx reg = gen_rtx_REG (reg_mode, 0);
26922 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26924 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26925 insn = emit_move_insn (lr, reg);
26926 RTX_FRAME_RELATED_P (insn) = 1;
26927 add_reg_note (insn, REG_CFA_RESTORE, lr);
26931 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26932 static void
26933 rs6000_set_handled_components (sbitmap components)
26935 rs6000_stack_t *info = rs6000_stack_info ();
26937 for (int i = info->first_gp_reg_save; i < 32; i++)
26938 if (bitmap_bit_p (components, i))
26939 cfun->machine->gpr_is_wrapped_separately[i] = true;
26941 for (int i = info->first_fp_reg_save; i < 64; i++)
26942 if (bitmap_bit_p (components, i))
26943 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26945 if (bitmap_bit_p (components, 0))
26946 cfun->machine->lr_is_wrapped_separately = true;
26948 if (bitmap_bit_p (components, 2))
26949 cfun->machine->toc_is_wrapped_separately = true;
26952 /* VRSAVE is a bit vector representing which AltiVec registers
26953 are used. The OS uses this to determine which vector
26954 registers to save on a context switch. We need to save
26955 VRSAVE on the stack frame, add whatever AltiVec registers we
26956 used in this function, and do the corresponding magic in the
26957 epilogue. */
26958 static void
26959 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26960 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26962 /* Get VRSAVE into a GPR. */
26963 rtx reg = gen_rtx_REG (SImode, save_regno);
26964 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26965 if (TARGET_MACHO)
26966 emit_insn (gen_get_vrsave_internal (reg));
26967 else
26968 emit_insn (gen_rtx_SET (reg, vrsave));
26970 /* Save VRSAVE. */
26971 int offset = info->vrsave_save_offset + frame_off;
26972 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26974 /* Include the registers in the mask. */
26975 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26977 emit_insn (generate_set_vrsave (reg, info, 0));
26980 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26981 called, it left the arg pointer to the old stack in r29. Otherwise, the
26982 arg pointer is the top of the current frame. */
26983 static void
26984 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26985 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26987 cfun->machine->split_stack_argp_used = true;
26989 if (sp_adjust)
26991 rtx r12 = gen_rtx_REG (Pmode, 12);
26992 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26993 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26994 emit_insn_before (set_r12, sp_adjust);
26996 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26998 rtx r12 = gen_rtx_REG (Pmode, 12);
26999 if (frame_off == 0)
27000 emit_move_insn (r12, frame_reg_rtx);
27001 else
27002 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
27005 if (info->push_p)
27007 rtx r12 = gen_rtx_REG (Pmode, 12);
27008 rtx r29 = gen_rtx_REG (Pmode, 29);
27009 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
27010 rtx not_more = gen_label_rtx ();
27011 rtx jump;
27013 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27014 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
27015 gen_rtx_LABEL_REF (VOIDmode, not_more),
27016 pc_rtx);
27017 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27018 JUMP_LABEL (jump) = not_more;
27019 LABEL_NUSES (not_more) += 1;
27020 emit_move_insn (r12, r29);
27021 emit_label (not_more);
27025 /* Emit function prologue as insns. */
27027 void
27028 rs6000_emit_prologue (void)
27030 rs6000_stack_t *info = rs6000_stack_info ();
27031 machine_mode reg_mode = Pmode;
27032 int reg_size = TARGET_32BIT ? 4 : 8;
27033 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27034 ? DFmode : SFmode;
27035 int fp_reg_size = 8;
27036 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27037 rtx frame_reg_rtx = sp_reg_rtx;
27038 unsigned int cr_save_regno;
27039 rtx cr_save_rtx = NULL_RTX;
27040 rtx_insn *insn;
27041 int strategy;
27042 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
27043 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
27044 && call_used_regs[STATIC_CHAIN_REGNUM]);
27045 int using_split_stack = (flag_split_stack
27046 && (lookup_attribute ("no_split_stack",
27047 DECL_ATTRIBUTES (cfun->decl))
27048 == NULL));
27050 /* Offset to top of frame for frame_reg and sp respectively. */
27051 HOST_WIDE_INT frame_off = 0;
27052 HOST_WIDE_INT sp_off = 0;
27053 /* sp_adjust is the stack adjusting instruction, tracked so that the
27054 insn setting up the split-stack arg pointer can be emitted just
27055 prior to it, when r12 is not used here for other purposes. */
27056 rtx_insn *sp_adjust = 0;
27058 #if CHECKING_P
27059 /* Track and check usage of r0, r11, r12. */
27060 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27061 #define START_USE(R) do \
27063 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27064 reg_inuse |= 1 << (R); \
27065 } while (0)
27066 #define END_USE(R) do \
27068 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27069 reg_inuse &= ~(1 << (R)); \
27070 } while (0)
27071 #define NOT_INUSE(R) do \
27073 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27074 } while (0)
27075 #else
27076 #define START_USE(R) do {} while (0)
27077 #define END_USE(R) do {} while (0)
27078 #define NOT_INUSE(R) do {} while (0)
27079 #endif
27081 if (DEFAULT_ABI == ABI_ELFv2
27082 && !TARGET_SINGLE_PIC_BASE)
27084 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27086 /* With -mminimal-toc we may generate an extra use of r2 below. */
27087 if (TARGET_TOC && TARGET_MINIMAL_TOC
27088 && !constant_pool_empty_p ())
27089 cfun->machine->r2_setup_needed = true;
27093 if (flag_stack_usage_info)
27094 current_function_static_stack_size = info->total_size;
27096 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27098 HOST_WIDE_INT size = info->total_size;
27100 if (crtl->is_leaf && !cfun->calls_alloca)
27102 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27103 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27104 size - get_stack_check_protect ());
27106 else if (size > 0)
27107 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27110 if (TARGET_FIX_AND_CONTINUE)
27112 /* gdb on darwin arranges to forward a function from the old
27113 address by modifying the first 5 instructions of the function
27114 to branch to the overriding function. This is necessary to
27115 permit function pointers that point to the old function to
27116 actually forward to the new function. */
27117 emit_insn (gen_nop ());
27118 emit_insn (gen_nop ());
27119 emit_insn (gen_nop ());
27120 emit_insn (gen_nop ());
27121 emit_insn (gen_nop ());
27124 /* Handle world saves specially here. */
27125 if (WORLD_SAVE_P (info))
27127 int i, j, sz;
27128 rtx treg;
27129 rtvec p;
27130 rtx reg0;
27132 /* save_world expects lr in r0. */
27133 reg0 = gen_rtx_REG (Pmode, 0);
27134 if (info->lr_save_p)
27136 insn = emit_move_insn (reg0,
27137 gen_rtx_REG (Pmode, LR_REGNO));
27138 RTX_FRAME_RELATED_P (insn) = 1;
27141 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27142 assumptions about the offsets of various bits of the stack
27143 frame. */
27144 gcc_assert (info->gp_save_offset == -220
27145 && info->fp_save_offset == -144
27146 && info->lr_save_offset == 8
27147 && info->cr_save_offset == 4
27148 && info->push_p
27149 && info->lr_save_p
27150 && (!crtl->calls_eh_return
27151 || info->ehrd_offset == -432)
27152 && info->vrsave_save_offset == -224
27153 && info->altivec_save_offset == -416);
27155 treg = gen_rtx_REG (SImode, 11);
27156 emit_move_insn (treg, GEN_INT (-info->total_size));
27158 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27159 in R11. It also clobbers R12, so beware! */
27161 /* Preserve CR2 for save_world prologues */
27162 sz = 5;
27163 sz += 32 - info->first_gp_reg_save;
27164 sz += 64 - info->first_fp_reg_save;
27165 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27166 p = rtvec_alloc (sz);
27167 j = 0;
27168 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
27169 gen_rtx_REG (SImode,
27170 LR_REGNO));
27171 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27172 gen_rtx_SYMBOL_REF (Pmode,
27173 "*save_world"));
27174 /* We do floats first so that the instruction pattern matches
27175 properly. */
27176 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27177 RTVEC_ELT (p, j++)
27178 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27179 ? DFmode : SFmode,
27180 info->first_fp_reg_save + i),
27181 frame_reg_rtx,
27182 info->fp_save_offset + frame_off + 8 * i);
27183 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27184 RTVEC_ELT (p, j++)
27185 = gen_frame_store (gen_rtx_REG (V4SImode,
27186 info->first_altivec_reg_save + i),
27187 frame_reg_rtx,
27188 info->altivec_save_offset + frame_off + 16 * i);
27189 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27190 RTVEC_ELT (p, j++)
27191 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27192 frame_reg_rtx,
27193 info->gp_save_offset + frame_off + reg_size * i);
27195 /* CR register traditionally saved as CR2. */
27196 RTVEC_ELT (p, j++)
27197 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27198 frame_reg_rtx, info->cr_save_offset + frame_off);
27199 /* Explain about use of R0. */
27200 if (info->lr_save_p)
27201 RTVEC_ELT (p, j++)
27202 = gen_frame_store (reg0,
27203 frame_reg_rtx, info->lr_save_offset + frame_off);
27204 /* Explain what happens to the stack pointer. */
27206 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27207 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27210 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27211 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27212 treg, GEN_INT (-info->total_size));
27213 sp_off = frame_off = info->total_size;
27216 strategy = info->savres_strategy;
27218 /* For V.4, update stack before we do any saving and set back pointer. */
27219 if (! WORLD_SAVE_P (info)
27220 && info->push_p
27221 && (DEFAULT_ABI == ABI_V4
27222 || crtl->calls_eh_return))
27224 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27225 || !(strategy & SAVE_INLINE_GPRS)
27226 || !(strategy & SAVE_INLINE_VRS));
27227 int ptr_regno = -1;
27228 rtx ptr_reg = NULL_RTX;
27229 int ptr_off = 0;
27231 if (info->total_size < 32767)
27232 frame_off = info->total_size;
27233 else if (need_r11)
27234 ptr_regno = 11;
27235 else if (info->cr_save_p
27236 || info->lr_save_p
27237 || info->first_fp_reg_save < 64
27238 || info->first_gp_reg_save < 32
27239 || info->altivec_size != 0
27240 || info->vrsave_size != 0
27241 || crtl->calls_eh_return)
27242 ptr_regno = 12;
27243 else
27245 /* The prologue won't be saving any regs so there is no need
27246 to set up a frame register to access any frame save area.
27247 We also won't be using frame_off anywhere below, but set
27248 the correct value anyway to protect against future
27249 changes to this function. */
27250 frame_off = info->total_size;
27252 if (ptr_regno != -1)
27254 /* Set up the frame offset to that needed by the first
27255 out-of-line save function. */
27256 START_USE (ptr_regno);
27257 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27258 frame_reg_rtx = ptr_reg;
27259 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27260 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27261 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27262 ptr_off = info->gp_save_offset + info->gp_size;
27263 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27264 ptr_off = info->altivec_save_offset + info->altivec_size;
27265 frame_off = -ptr_off;
27267 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27268 ptr_reg, ptr_off);
27269 if (REGNO (frame_reg_rtx) == 12)
27270 sp_adjust = 0;
27271 sp_off = info->total_size;
27272 if (frame_reg_rtx != sp_reg_rtx)
27273 rs6000_emit_stack_tie (frame_reg_rtx, false);
27276 /* If we use the link register, get it into r0. */
27277 if (!WORLD_SAVE_P (info) && info->lr_save_p
27278 && !cfun->machine->lr_is_wrapped_separately)
27280 rtx addr, reg, mem;
27282 reg = gen_rtx_REG (Pmode, 0);
27283 START_USE (0);
27284 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27285 RTX_FRAME_RELATED_P (insn) = 1;
27287 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27288 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27290 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27291 GEN_INT (info->lr_save_offset + frame_off));
27292 mem = gen_rtx_MEM (Pmode, addr);
27293 /* This should not be of rs6000_sr_alias_set, because of
27294 __builtin_return_address. */
27296 insn = emit_move_insn (mem, reg);
27297 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27298 NULL_RTX, NULL_RTX);
27299 END_USE (0);
27303 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27304 r12 will be needed by out-of-line gpr restore. */
27305 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27306 && !(strategy & (SAVE_INLINE_GPRS
27307 | SAVE_NOINLINE_GPRS_SAVES_LR))
27308 ? 11 : 12);
27309 if (!WORLD_SAVE_P (info)
27310 && info->cr_save_p
27311 && REGNO (frame_reg_rtx) != cr_save_regno
27312 && !(using_static_chain_p && cr_save_regno == 11)
27313 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27315 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27316 START_USE (cr_save_regno);
27317 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27320 /* Do any required saving of fpr's. If only one or two to save, do
27321 it ourselves. Otherwise, call function. */
27322 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27324 int offset = info->fp_save_offset + frame_off;
27325 for (int i = info->first_fp_reg_save; i < 64; i++)
27327 if (save_reg_p (i)
27328 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27329 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27330 sp_off - frame_off);
27332 offset += fp_reg_size;
27335 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27337 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27338 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27339 unsigned ptr_regno = ptr_regno_for_savres (sel);
27340 rtx ptr_reg = frame_reg_rtx;
27342 if (REGNO (frame_reg_rtx) == ptr_regno)
27343 gcc_checking_assert (frame_off == 0);
27344 else
27346 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27347 NOT_INUSE (ptr_regno);
27348 emit_insn (gen_add3_insn (ptr_reg,
27349 frame_reg_rtx, GEN_INT (frame_off)));
27351 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27352 info->fp_save_offset,
27353 info->lr_save_offset,
27354 DFmode, sel);
27355 rs6000_frame_related (insn, ptr_reg, sp_off,
27356 NULL_RTX, NULL_RTX);
27357 if (lr)
27358 END_USE (0);
27361 /* Save GPRs. This is done as a PARALLEL if we are using
27362 the store-multiple instructions. */
27363 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27365 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27366 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27367 unsigned ptr_regno = ptr_regno_for_savres (sel);
27368 rtx ptr_reg = frame_reg_rtx;
27369 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27370 int end_save = info->gp_save_offset + info->gp_size;
27371 int ptr_off;
27373 if (ptr_regno == 12)
27374 sp_adjust = 0;
27375 if (!ptr_set_up)
27376 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27378 /* Need to adjust r11 (r12) if we saved any FPRs. */
27379 if (end_save + frame_off != 0)
27381 rtx offset = GEN_INT (end_save + frame_off);
27383 if (ptr_set_up)
27384 frame_off = -end_save;
27385 else
27386 NOT_INUSE (ptr_regno);
27387 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27389 else if (!ptr_set_up)
27391 NOT_INUSE (ptr_regno);
27392 emit_move_insn (ptr_reg, frame_reg_rtx);
27394 ptr_off = -end_save;
27395 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27396 info->gp_save_offset + ptr_off,
27397 info->lr_save_offset + ptr_off,
27398 reg_mode, sel);
27399 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27400 NULL_RTX, NULL_RTX);
27401 if (lr)
27402 END_USE (0);
27404 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27406 rtvec p;
27407 int i;
27408 p = rtvec_alloc (32 - info->first_gp_reg_save);
27409 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27410 RTVEC_ELT (p, i)
27411 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27412 frame_reg_rtx,
27413 info->gp_save_offset + frame_off + reg_size * i);
27414 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27415 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27416 NULL_RTX, NULL_RTX);
27418 else if (!WORLD_SAVE_P (info))
27420 int offset = info->gp_save_offset + frame_off;
27421 for (int i = info->first_gp_reg_save; i < 32; i++)
27423 if (save_reg_p (i)
27424 && !cfun->machine->gpr_is_wrapped_separately[i])
27425 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27426 sp_off - frame_off);
27428 offset += reg_size;
27432 if (crtl->calls_eh_return)
27434 unsigned int i;
27435 rtvec p;
27437 for (i = 0; ; ++i)
27439 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27440 if (regno == INVALID_REGNUM)
27441 break;
27444 p = rtvec_alloc (i);
27446 for (i = 0; ; ++i)
27448 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27449 if (regno == INVALID_REGNUM)
27450 break;
27452 rtx set
27453 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27454 sp_reg_rtx,
27455 info->ehrd_offset + sp_off + reg_size * (int) i);
27456 RTVEC_ELT (p, i) = set;
27457 RTX_FRAME_RELATED_P (set) = 1;
27460 insn = emit_insn (gen_blockage ());
27461 RTX_FRAME_RELATED_P (insn) = 1;
27462 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27465 /* In AIX ABI we need to make sure r2 is really saved. */
27466 if (TARGET_AIX && crtl->calls_eh_return)
27468 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27469 rtx join_insn, note;
27470 rtx_insn *save_insn;
27471 long toc_restore_insn;
27473 tmp_reg = gen_rtx_REG (Pmode, 11);
27474 tmp_reg_si = gen_rtx_REG (SImode, 11);
27475 if (using_static_chain_p)
27477 START_USE (0);
27478 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27480 else
27481 START_USE (11);
27482 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27483 /* Peek at instruction to which this function returns. If it's
27484 restoring r2, then we know we've already saved r2. We can't
27485 unconditionally save r2 because the value we have will already
27486 be updated if we arrived at this function via a plt call or
27487 toc adjusting stub. */
27488 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27489 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27490 + RS6000_TOC_SAVE_SLOT);
27491 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27492 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27493 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27494 validate_condition_mode (EQ, CCUNSmode);
27495 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27496 emit_insn (gen_rtx_SET (compare_result,
27497 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27498 toc_save_done = gen_label_rtx ();
27499 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27500 gen_rtx_EQ (VOIDmode, compare_result,
27501 const0_rtx),
27502 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27503 pc_rtx);
27504 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27505 JUMP_LABEL (jump) = toc_save_done;
27506 LABEL_NUSES (toc_save_done) += 1;
27508 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27509 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27510 sp_off - frame_off);
27512 emit_label (toc_save_done);
27514 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27515 have a CFG that has different saves along different paths.
27516 Move the note to a dummy blockage insn, which describes that
27517 R2 is unconditionally saved after the label. */
27518 /* ??? An alternate representation might be a special insn pattern
27519 containing both the branch and the store. That might let the
27520 code that minimizes the number of DW_CFA_advance opcodes better
27521 freedom in placing the annotations. */
27522 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27523 if (note)
27524 remove_note (save_insn, note);
27525 else
27526 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27527 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27528 RTX_FRAME_RELATED_P (save_insn) = 0;
27530 join_insn = emit_insn (gen_blockage ());
27531 REG_NOTES (join_insn) = note;
27532 RTX_FRAME_RELATED_P (join_insn) = 1;
27534 if (using_static_chain_p)
27536 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27537 END_USE (0);
27539 else
27540 END_USE (11);
27543 /* Save CR if we use any that must be preserved. */
27544 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27546 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27547 GEN_INT (info->cr_save_offset + frame_off));
27548 rtx mem = gen_frame_mem (SImode, addr);
27550 /* If we didn't copy cr before, do so now using r0. */
27551 if (cr_save_rtx == NULL_RTX)
27553 START_USE (0);
27554 cr_save_rtx = gen_rtx_REG (SImode, 0);
27555 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27558 /* Saving CR requires a two-instruction sequence: one instruction
27559 to move the CR to a general-purpose register, and a second
27560 instruction that stores the GPR to memory.
27562 We do not emit any DWARF CFI records for the first of these,
27563 because we cannot properly represent the fact that CR is saved in
27564 a register. One reason is that we cannot express that multiple
27565 CR fields are saved; another reason is that on 64-bit, the size
27566 of the CR register in DWARF (4 bytes) differs from the size of
27567 a general-purpose register.
27569 This means if any intervening instruction were to clobber one of
27570 the call-saved CR fields, we'd have incorrect CFI. To prevent
27571 this from happening, we mark the store to memory as a use of
27572 those CR fields, which prevents any such instruction from being
27573 scheduled in between the two instructions. */
27574 rtx crsave_v[9];
27575 int n_crsave = 0;
27576 int i;
27578 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27579 for (i = 0; i < 8; i++)
27580 if (save_reg_p (CR0_REGNO + i))
27581 crsave_v[n_crsave++]
27582 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27584 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27585 gen_rtvec_v (n_crsave, crsave_v)));
27586 END_USE (REGNO (cr_save_rtx));
27588 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27589 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27590 so we need to construct a frame expression manually. */
27591 RTX_FRAME_RELATED_P (insn) = 1;
27593 /* Update address to be stack-pointer relative, like
27594 rs6000_frame_related would do. */
27595 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27596 GEN_INT (info->cr_save_offset + sp_off));
27597 mem = gen_frame_mem (SImode, addr);
27599 if (DEFAULT_ABI == ABI_ELFv2)
27601 /* In the ELFv2 ABI we generate separate CFI records for each
27602 CR field that was actually saved. They all point to the
27603 same 32-bit stack slot. */
27604 rtx crframe[8];
27605 int n_crframe = 0;
27607 for (i = 0; i < 8; i++)
27608 if (save_reg_p (CR0_REGNO + i))
27610 crframe[n_crframe]
27611 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27613 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27614 n_crframe++;
27617 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27618 gen_rtx_PARALLEL (VOIDmode,
27619 gen_rtvec_v (n_crframe, crframe)));
27621 else
27623 /* In other ABIs, by convention, we use a single CR regnum to
27624 represent the fact that all call-saved CR fields are saved.
27625 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27626 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27627 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27631 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27632 *separate* slots if the routine calls __builtin_eh_return, so
27633 that they can be independently restored by the unwinder. */
27634 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27636 int i, cr_off = info->ehcr_offset;
27637 rtx crsave;
27639 /* ??? We might get better performance by using multiple mfocrf
27640 instructions. */
27641 crsave = gen_rtx_REG (SImode, 0);
27642 emit_insn (gen_prologue_movesi_from_cr (crsave));
27644 for (i = 0; i < 8; i++)
27645 if (!call_used_regs[CR0_REGNO + i])
27647 rtvec p = rtvec_alloc (2);
27648 RTVEC_ELT (p, 0)
27649 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27650 RTVEC_ELT (p, 1)
27651 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27653 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27655 RTX_FRAME_RELATED_P (insn) = 1;
27656 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27657 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27658 sp_reg_rtx, cr_off + sp_off));
27660 cr_off += reg_size;
27664 /* If we are emitting stack probes, but allocate no stack, then
27665 just note that in the dump file. */
27666 if (flag_stack_clash_protection
27667 && dump_file
27668 && !info->push_p)
27669 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27671 /* Update stack and set back pointer unless this is V.4,
27672 for which it was done previously. */
27673 if (!WORLD_SAVE_P (info) && info->push_p
27674 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27676 rtx ptr_reg = NULL;
27677 int ptr_off = 0;
27679 /* If saving altivec regs we need to be able to address all save
27680 locations using a 16-bit offset. */
27681 if ((strategy & SAVE_INLINE_VRS) == 0
27682 || (info->altivec_size != 0
27683 && (info->altivec_save_offset + info->altivec_size - 16
27684 + info->total_size - frame_off) > 32767)
27685 || (info->vrsave_size != 0
27686 && (info->vrsave_save_offset
27687 + info->total_size - frame_off) > 32767))
27689 int sel = SAVRES_SAVE | SAVRES_VR;
27690 unsigned ptr_regno = ptr_regno_for_savres (sel);
27692 if (using_static_chain_p
27693 && ptr_regno == STATIC_CHAIN_REGNUM)
27694 ptr_regno = 12;
27695 if (REGNO (frame_reg_rtx) != ptr_regno)
27696 START_USE (ptr_regno);
27697 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27698 frame_reg_rtx = ptr_reg;
27699 ptr_off = info->altivec_save_offset + info->altivec_size;
27700 frame_off = -ptr_off;
27702 else if (REGNO (frame_reg_rtx) == 1)
27703 frame_off = info->total_size;
27704 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27705 ptr_reg, ptr_off);
27706 if (REGNO (frame_reg_rtx) == 12)
27707 sp_adjust = 0;
27708 sp_off = info->total_size;
27709 if (frame_reg_rtx != sp_reg_rtx)
27710 rs6000_emit_stack_tie (frame_reg_rtx, false);
27713 /* Set frame pointer, if needed. */
27714 if (frame_pointer_needed)
27716 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27717 sp_reg_rtx);
27718 RTX_FRAME_RELATED_P (insn) = 1;
27721 /* Save AltiVec registers if needed. Save here because the red zone does
27722 not always include AltiVec registers. */
27723 if (!WORLD_SAVE_P (info)
27724 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27726 int end_save = info->altivec_save_offset + info->altivec_size;
27727 int ptr_off;
27728 /* Oddly, the vector save/restore functions point r0 at the end
27729 of the save area, then use r11 or r12 to load offsets for
27730 [reg+reg] addressing. */
27731 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27732 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27733 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27735 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27736 NOT_INUSE (0);
27737 if (scratch_regno == 12)
27738 sp_adjust = 0;
27739 if (end_save + frame_off != 0)
27741 rtx offset = GEN_INT (end_save + frame_off);
27743 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27745 else
27746 emit_move_insn (ptr_reg, frame_reg_rtx);
27748 ptr_off = -end_save;
27749 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27750 info->altivec_save_offset + ptr_off,
27751 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27752 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27753 NULL_RTX, NULL_RTX);
27754 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27756 /* The oddity mentioned above clobbered our frame reg. */
27757 emit_move_insn (frame_reg_rtx, ptr_reg);
27758 frame_off = ptr_off;
27761 else if (!WORLD_SAVE_P (info)
27762 && info->altivec_size != 0)
27764 int i;
27766 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27767 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27769 rtx areg, savereg, mem;
27770 HOST_WIDE_INT offset;
27772 offset = (info->altivec_save_offset + frame_off
27773 + 16 * (i - info->first_altivec_reg_save));
27775 savereg = gen_rtx_REG (V4SImode, i);
27777 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27779 mem = gen_frame_mem (V4SImode,
27780 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27781 GEN_INT (offset)));
27782 insn = emit_insn (gen_rtx_SET (mem, savereg));
27783 areg = NULL_RTX;
27785 else
27787 NOT_INUSE (0);
27788 areg = gen_rtx_REG (Pmode, 0);
27789 emit_move_insn (areg, GEN_INT (offset));
27791 /* AltiVec addressing mode is [reg+reg]. */
27792 mem = gen_frame_mem (V4SImode,
27793 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27795 /* Rather than emitting a generic move, force use of the stvx
27796 instruction, which we always want on ISA 2.07 (power8) systems.
27797 In particular we don't want xxpermdi/stxvd2x for little
27798 endian. */
27799 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27802 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27803 areg, GEN_INT (offset));
27807 /* VRSAVE is a bit vector representing which AltiVec registers
27808 are used. The OS uses this to determine which vector
27809 registers to save on a context switch. We need to save
27810 VRSAVE on the stack frame, add whatever AltiVec registers we
27811 used in this function, and do the corresponding magic in the
27812 epilogue. */
27814 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27816 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27817 be using r12 as frame_reg_rtx and r11 as the static chain
27818 pointer for nested functions. */
27819 int save_regno = 12;
27820 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27821 && !using_static_chain_p)
27822 save_regno = 11;
27823 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27825 save_regno = 11;
27826 if (using_static_chain_p)
27827 save_regno = 0;
27829 NOT_INUSE (save_regno);
27831 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27834 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27835 if (!TARGET_SINGLE_PIC_BASE
27836 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27837 && !constant_pool_empty_p ())
27838 || (DEFAULT_ABI == ABI_V4
27839 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27840 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27842 /* If emit_load_toc_table will use the link register, we need to save
27843 it. We use R12 for this purpose because emit_load_toc_table
27844 can use register 0. This allows us to use a plain 'blr' to return
27845 from the procedure more often. */
27846 int save_LR_around_toc_setup = (TARGET_ELF
27847 && DEFAULT_ABI == ABI_V4
27848 && flag_pic
27849 && ! info->lr_save_p
27850 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27851 if (save_LR_around_toc_setup)
27853 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27854 rtx tmp = gen_rtx_REG (Pmode, 12);
27856 sp_adjust = 0;
27857 insn = emit_move_insn (tmp, lr);
27858 RTX_FRAME_RELATED_P (insn) = 1;
27860 rs6000_emit_load_toc_table (TRUE);
27862 insn = emit_move_insn (lr, tmp);
27863 add_reg_note (insn, REG_CFA_RESTORE, lr);
27864 RTX_FRAME_RELATED_P (insn) = 1;
27866 else
27867 rs6000_emit_load_toc_table (TRUE);
27870 #if TARGET_MACHO
27871 if (!TARGET_SINGLE_PIC_BASE
27872 && DEFAULT_ABI == ABI_DARWIN
27873 && flag_pic && crtl->uses_pic_offset_table)
27875 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27876 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27878 /* Save and restore LR locally around this call (in R0). */
27879 if (!info->lr_save_p)
27880 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27882 emit_insn (gen_load_macho_picbase (src));
27884 emit_move_insn (gen_rtx_REG (Pmode,
27885 RS6000_PIC_OFFSET_TABLE_REGNUM),
27886 lr);
27888 if (!info->lr_save_p)
27889 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27891 #endif
27893 /* If we need to, save the TOC register after doing the stack setup.
27894 Do not emit eh frame info for this save. The unwinder wants info,
27895 conceptually attached to instructions in this function, about
27896 register values in the caller of this function. This R2 may have
27897 already been changed from the value in the caller.
27898 We don't attempt to write accurate DWARF EH frame info for R2
27899 because code emitted by gcc for a (non-pointer) function call
27900 doesn't save and restore R2. Instead, R2 is managed out-of-line
27901 by a linker generated plt call stub when the function resides in
27902 a shared library. This behavior is costly to describe in DWARF,
27903 both in terms of the size of DWARF info and the time taken in the
27904 unwinder to interpret it. R2 changes, apart from the
27905 calls_eh_return case earlier in this function, are handled by
27906 linux-unwind.h frob_update_context. */
27907 if (rs6000_save_toc_in_prologue_p ()
27908 && !cfun->machine->toc_is_wrapped_separately)
27910 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27911 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27914 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27915 if (using_split_stack && split_stack_arg_pointer_used_p ())
27916 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27919 /* Output .extern statements for the save/restore routines we use. */
27921 static void
27922 rs6000_output_savres_externs (FILE *file)
27924 rs6000_stack_t *info = rs6000_stack_info ();
27926 if (TARGET_DEBUG_STACK)
27927 debug_stack_info (info);
27929 /* Write .extern for any function we will call to save and restore
27930 fp values. */
27931 if (info->first_fp_reg_save < 64
27932 && !TARGET_MACHO
27933 && !TARGET_ELF)
27935 char *name;
27936 int regno = info->first_fp_reg_save - 32;
27938 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27940 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27941 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27942 name = rs6000_savres_routine_name (regno, sel);
27943 fprintf (file, "\t.extern %s\n", name);
27945 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27947 bool lr = (info->savres_strategy
27948 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27949 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27950 name = rs6000_savres_routine_name (regno, sel);
27951 fprintf (file, "\t.extern %s\n", name);
27956 /* Write function prologue. */
27958 static void
27959 rs6000_output_function_prologue (FILE *file)
27961 if (!cfun->is_thunk)
27962 rs6000_output_savres_externs (file);
27964 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27965 immediately after the global entry point label. */
27966 if (rs6000_global_entry_point_needed_p ())
27968 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27970 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27972 if (TARGET_CMODEL != CMODEL_LARGE)
27974 /* In the small and medium code models, we assume the TOC is less
27975 2 GB away from the text section, so it can be computed via the
27976 following two-instruction sequence. */
27977 char buf[256];
27979 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27980 fprintf (file, "0:\taddis 2,12,.TOC.-");
27981 assemble_name (file, buf);
27982 fprintf (file, "@ha\n");
27983 fprintf (file, "\taddi 2,2,.TOC.-");
27984 assemble_name (file, buf);
27985 fprintf (file, "@l\n");
27987 else
27989 /* In the large code model, we allow arbitrary offsets between the
27990 TOC and the text section, so we have to load the offset from
27991 memory. The data field is emitted directly before the global
27992 entry point in rs6000_elf_declare_function_name. */
27993 char buf[256];
27995 #ifdef HAVE_AS_ENTRY_MARKERS
27996 /* If supported by the linker, emit a marker relocation. If the
27997 total code size of the final executable or shared library
27998 happens to fit into 2 GB after all, the linker will replace
27999 this code sequence with the sequence for the small or medium
28000 code model. */
28001 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
28002 #endif
28003 fprintf (file, "\tld 2,");
28004 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
28005 assemble_name (file, buf);
28006 fprintf (file, "-");
28007 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
28008 assemble_name (file, buf);
28009 fprintf (file, "(12)\n");
28010 fprintf (file, "\tadd 2,2,12\n");
28013 fputs ("\t.localentry\t", file);
28014 assemble_name (file, name);
28015 fputs (",.-", file);
28016 assemble_name (file, name);
28017 fputs ("\n", file);
28020 /* Output -mprofile-kernel code. This needs to be done here instead of
28021 in output_function_profile since it must go after the ELFv2 ABI
28022 local entry point. */
28023 if (TARGET_PROFILE_KERNEL && crtl->profile)
28025 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
28026 gcc_assert (!TARGET_32BIT);
28028 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
28030 /* In the ELFv2 ABI we have no compiler stack word. It must be
28031 the resposibility of _mcount to preserve the static chain
28032 register if required. */
28033 if (DEFAULT_ABI != ABI_ELFv2
28034 && cfun->static_chain_decl != NULL)
28036 asm_fprintf (file, "\tstd %s,24(%s)\n",
28037 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28038 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28039 asm_fprintf (file, "\tld %s,24(%s)\n",
28040 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
28042 else
28043 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
28046 rs6000_pic_labelno++;
28049 /* -mprofile-kernel code calls mcount before the function prolog,
28050 so a profiled leaf function should stay a leaf function. */
28051 static bool
28052 rs6000_keep_leaf_when_profiled ()
28054 return TARGET_PROFILE_KERNEL;
28057 /* Non-zero if vmx regs are restored before the frame pop, zero if
28058 we restore after the pop when possible. */
28059 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
28061 /* Restoring cr is a two step process: loading a reg from the frame
28062 save, then moving the reg to cr. For ABI_V4 we must let the
28063 unwinder know that the stack location is no longer valid at or
28064 before the stack deallocation, but we can't emit a cfa_restore for
28065 cr at the stack deallocation like we do for other registers.
28066 The trouble is that it is possible for the move to cr to be
28067 scheduled after the stack deallocation. So say exactly where cr
28068 is located on each of the two insns. */
28070 static rtx
28071 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28073 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28074 rtx reg = gen_rtx_REG (SImode, regno);
28075 rtx_insn *insn = emit_move_insn (reg, mem);
28077 if (!exit_func && DEFAULT_ABI == ABI_V4)
28079 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28080 rtx set = gen_rtx_SET (reg, cr);
28082 add_reg_note (insn, REG_CFA_REGISTER, set);
28083 RTX_FRAME_RELATED_P (insn) = 1;
28085 return reg;
28088 /* Reload CR from REG. */
28090 static void
28091 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28093 int count = 0;
28094 int i;
28096 if (using_mfcr_multiple)
28098 for (i = 0; i < 8; i++)
28099 if (save_reg_p (CR0_REGNO + i))
28100 count++;
28101 gcc_assert (count);
28104 if (using_mfcr_multiple && count > 1)
28106 rtx_insn *insn;
28107 rtvec p;
28108 int ndx;
28110 p = rtvec_alloc (count);
28112 ndx = 0;
28113 for (i = 0; i < 8; i++)
28114 if (save_reg_p (CR0_REGNO + i))
28116 rtvec r = rtvec_alloc (2);
28117 RTVEC_ELT (r, 0) = reg;
28118 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28119 RTVEC_ELT (p, ndx) =
28120 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28121 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28122 ndx++;
28124 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28125 gcc_assert (ndx == count);
28127 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28128 CR field separately. */
28129 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28131 for (i = 0; i < 8; i++)
28132 if (save_reg_p (CR0_REGNO + i))
28133 add_reg_note (insn, REG_CFA_RESTORE,
28134 gen_rtx_REG (SImode, CR0_REGNO + i));
28136 RTX_FRAME_RELATED_P (insn) = 1;
28139 else
28140 for (i = 0; i < 8; i++)
28141 if (save_reg_p (CR0_REGNO + i))
28143 rtx insn = emit_insn (gen_movsi_to_cr_one
28144 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28146 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28147 CR field separately, attached to the insn that in fact
28148 restores this particular CR field. */
28149 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28151 add_reg_note (insn, REG_CFA_RESTORE,
28152 gen_rtx_REG (SImode, CR0_REGNO + i));
28154 RTX_FRAME_RELATED_P (insn) = 1;
28158 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28159 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28160 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28162 rtx_insn *insn = get_last_insn ();
28163 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28165 add_reg_note (insn, REG_CFA_RESTORE, cr);
28166 RTX_FRAME_RELATED_P (insn) = 1;
28170 /* Like cr, the move to lr instruction can be scheduled after the
28171 stack deallocation, but unlike cr, its stack frame save is still
28172 valid. So we only need to emit the cfa_restore on the correct
28173 instruction. */
28175 static void
28176 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28178 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28179 rtx reg = gen_rtx_REG (Pmode, regno);
28181 emit_move_insn (reg, mem);
28184 static void
28185 restore_saved_lr (int regno, bool exit_func)
28187 rtx reg = gen_rtx_REG (Pmode, regno);
28188 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28189 rtx_insn *insn = emit_move_insn (lr, reg);
28191 if (!exit_func && flag_shrink_wrap)
28193 add_reg_note (insn, REG_CFA_RESTORE, lr);
28194 RTX_FRAME_RELATED_P (insn) = 1;
28198 static rtx
28199 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28201 if (DEFAULT_ABI == ABI_ELFv2)
28203 int i;
28204 for (i = 0; i < 8; i++)
28205 if (save_reg_p (CR0_REGNO + i))
28207 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28208 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28209 cfa_restores);
28212 else if (info->cr_save_p)
28213 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28214 gen_rtx_REG (SImode, CR2_REGNO),
28215 cfa_restores);
28217 if (info->lr_save_p)
28218 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28219 gen_rtx_REG (Pmode, LR_REGNO),
28220 cfa_restores);
28221 return cfa_restores;
28224 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28225 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28226 below stack pointer not cloberred by signals. */
28228 static inline bool
28229 offset_below_red_zone_p (HOST_WIDE_INT offset)
28231 return offset < (DEFAULT_ABI == ABI_V4
28233 : TARGET_32BIT ? -220 : -288);
28236 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28238 static void
28239 emit_cfa_restores (rtx cfa_restores)
28241 rtx_insn *insn = get_last_insn ();
28242 rtx *loc = &REG_NOTES (insn);
28244 while (*loc)
28245 loc = &XEXP (*loc, 1);
28246 *loc = cfa_restores;
28247 RTX_FRAME_RELATED_P (insn) = 1;
28250 /* Emit function epilogue as insns. */
28252 void
28253 rs6000_emit_epilogue (int sibcall)
28255 rs6000_stack_t *info;
28256 int restoring_GPRs_inline;
28257 int restoring_FPRs_inline;
28258 int using_load_multiple;
28259 int using_mtcr_multiple;
28260 int use_backchain_to_restore_sp;
28261 int restore_lr;
28262 int strategy;
28263 HOST_WIDE_INT frame_off = 0;
28264 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28265 rtx frame_reg_rtx = sp_reg_rtx;
28266 rtx cfa_restores = NULL_RTX;
28267 rtx insn;
28268 rtx cr_save_reg = NULL_RTX;
28269 machine_mode reg_mode = Pmode;
28270 int reg_size = TARGET_32BIT ? 4 : 8;
28271 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
28272 ? DFmode : SFmode;
28273 int fp_reg_size = 8;
28274 int i;
28275 bool exit_func;
28276 unsigned ptr_regno;
28278 info = rs6000_stack_info ();
28280 strategy = info->savres_strategy;
28281 using_load_multiple = strategy & REST_MULTIPLE;
28282 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28283 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28284 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
28285 || rs6000_cpu == PROCESSOR_PPC603
28286 || rs6000_cpu == PROCESSOR_PPC750
28287 || optimize_size);
28288 /* Restore via the backchain when we have a large frame, since this
28289 is more efficient than an addis, addi pair. The second condition
28290 here will not trigger at the moment; We don't actually need a
28291 frame pointer for alloca, but the generic parts of the compiler
28292 give us one anyway. */
28293 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28294 ? info->lr_save_offset
28295 : 0) > 32767
28296 || (cfun->calls_alloca
28297 && !frame_pointer_needed));
28298 restore_lr = (info->lr_save_p
28299 && (restoring_FPRs_inline
28300 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28301 && (restoring_GPRs_inline
28302 || info->first_fp_reg_save < 64)
28303 && !cfun->machine->lr_is_wrapped_separately);
28306 if (WORLD_SAVE_P (info))
28308 int i, j;
28309 char rname[30];
28310 const char *alloc_rname;
28311 rtvec p;
28313 /* eh_rest_world_r10 will return to the location saved in the LR
28314 stack slot (which is not likely to be our caller.)
28315 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28316 rest_world is similar, except any R10 parameter is ignored.
28317 The exception-handling stuff that was here in 2.95 is no
28318 longer necessary. */
28320 p = rtvec_alloc (9
28321 + 32 - info->first_gp_reg_save
28322 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28323 + 63 + 1 - info->first_fp_reg_save);
28325 strcpy (rname, ((crtl->calls_eh_return) ?
28326 "*eh_rest_world_r10" : "*rest_world"));
28327 alloc_rname = ggc_strdup (rname);
28329 j = 0;
28330 RTVEC_ELT (p, j++) = ret_rtx;
28331 RTVEC_ELT (p, j++)
28332 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28333 /* The instruction pattern requires a clobber here;
28334 it is shared with the restVEC helper. */
28335 RTVEC_ELT (p, j++)
28336 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
28339 /* CR register traditionally saved as CR2. */
28340 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28341 RTVEC_ELT (p, j++)
28342 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28343 if (flag_shrink_wrap)
28345 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28346 gen_rtx_REG (Pmode, LR_REGNO),
28347 cfa_restores);
28348 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28352 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28354 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28355 RTVEC_ELT (p, j++)
28356 = gen_frame_load (reg,
28357 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28358 if (flag_shrink_wrap
28359 && save_reg_p (info->first_gp_reg_save + i))
28360 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28362 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28364 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28365 RTVEC_ELT (p, j++)
28366 = gen_frame_load (reg,
28367 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28368 if (flag_shrink_wrap
28369 && save_reg_p (info->first_altivec_reg_save + i))
28370 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28372 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28374 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28375 ? DFmode : SFmode),
28376 info->first_fp_reg_save + i);
28377 RTVEC_ELT (p, j++)
28378 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28379 if (flag_shrink_wrap
28380 && save_reg_p (info->first_fp_reg_save + i))
28381 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28383 RTVEC_ELT (p, j++)
28384 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28385 RTVEC_ELT (p, j++)
28386 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28387 RTVEC_ELT (p, j++)
28388 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28389 RTVEC_ELT (p, j++)
28390 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28391 RTVEC_ELT (p, j++)
28392 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28393 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28395 if (flag_shrink_wrap)
28397 REG_NOTES (insn) = cfa_restores;
28398 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28399 RTX_FRAME_RELATED_P (insn) = 1;
28401 return;
28404 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28405 if (info->push_p)
28406 frame_off = info->total_size;
28408 /* Restore AltiVec registers if we must do so before adjusting the
28409 stack. */
28410 if (info->altivec_size != 0
28411 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28412 || (DEFAULT_ABI != ABI_V4
28413 && offset_below_red_zone_p (info->altivec_save_offset))))
28415 int i;
28416 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28418 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28419 if (use_backchain_to_restore_sp)
28421 int frame_regno = 11;
28423 if ((strategy & REST_INLINE_VRS) == 0)
28425 /* Of r11 and r12, select the one not clobbered by an
28426 out-of-line restore function for the frame register. */
28427 frame_regno = 11 + 12 - scratch_regno;
28429 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28430 emit_move_insn (frame_reg_rtx,
28431 gen_rtx_MEM (Pmode, sp_reg_rtx));
28432 frame_off = 0;
28434 else if (frame_pointer_needed)
28435 frame_reg_rtx = hard_frame_pointer_rtx;
28437 if ((strategy & REST_INLINE_VRS) == 0)
28439 int end_save = info->altivec_save_offset + info->altivec_size;
28440 int ptr_off;
28441 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28442 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28444 if (end_save + frame_off != 0)
28446 rtx offset = GEN_INT (end_save + frame_off);
28448 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28450 else
28451 emit_move_insn (ptr_reg, frame_reg_rtx);
28453 ptr_off = -end_save;
28454 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28455 info->altivec_save_offset + ptr_off,
28456 0, V4SImode, SAVRES_VR);
28458 else
28460 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28461 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28463 rtx addr, areg, mem, insn;
28464 rtx reg = gen_rtx_REG (V4SImode, i);
28465 HOST_WIDE_INT offset
28466 = (info->altivec_save_offset + frame_off
28467 + 16 * (i - info->first_altivec_reg_save));
28469 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28471 mem = gen_frame_mem (V4SImode,
28472 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28473 GEN_INT (offset)));
28474 insn = gen_rtx_SET (reg, mem);
28476 else
28478 areg = gen_rtx_REG (Pmode, 0);
28479 emit_move_insn (areg, GEN_INT (offset));
28481 /* AltiVec addressing mode is [reg+reg]. */
28482 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28483 mem = gen_frame_mem (V4SImode, addr);
28485 /* Rather than emitting a generic move, force use of the
28486 lvx instruction, which we always want. In particular we
28487 don't want lxvd2x/xxpermdi for little endian. */
28488 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28491 (void) emit_insn (insn);
28495 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28496 if (((strategy & REST_INLINE_VRS) == 0
28497 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28498 && (flag_shrink_wrap
28499 || (offset_below_red_zone_p
28500 (info->altivec_save_offset
28501 + 16 * (i - info->first_altivec_reg_save))))
28502 && save_reg_p (i))
28504 rtx reg = gen_rtx_REG (V4SImode, i);
28505 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28509 /* Restore VRSAVE if we must do so before adjusting the stack. */
28510 if (info->vrsave_size != 0
28511 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28512 || (DEFAULT_ABI != ABI_V4
28513 && offset_below_red_zone_p (info->vrsave_save_offset))))
28515 rtx reg;
28517 if (frame_reg_rtx == sp_reg_rtx)
28519 if (use_backchain_to_restore_sp)
28521 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28522 emit_move_insn (frame_reg_rtx,
28523 gen_rtx_MEM (Pmode, sp_reg_rtx));
28524 frame_off = 0;
28526 else if (frame_pointer_needed)
28527 frame_reg_rtx = hard_frame_pointer_rtx;
28530 reg = gen_rtx_REG (SImode, 12);
28531 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28532 info->vrsave_save_offset + frame_off));
28534 emit_insn (generate_set_vrsave (reg, info, 1));
28537 insn = NULL_RTX;
28538 /* If we have a large stack frame, restore the old stack pointer
28539 using the backchain. */
28540 if (use_backchain_to_restore_sp)
28542 if (frame_reg_rtx == sp_reg_rtx)
28544 /* Under V.4, don't reset the stack pointer until after we're done
28545 loading the saved registers. */
28546 if (DEFAULT_ABI == ABI_V4)
28547 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28549 insn = emit_move_insn (frame_reg_rtx,
28550 gen_rtx_MEM (Pmode, sp_reg_rtx));
28551 frame_off = 0;
28553 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28554 && DEFAULT_ABI == ABI_V4)
28555 /* frame_reg_rtx has been set up by the altivec restore. */
28557 else
28559 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28560 frame_reg_rtx = sp_reg_rtx;
28563 /* If we have a frame pointer, we can restore the old stack pointer
28564 from it. */
28565 else if (frame_pointer_needed)
28567 frame_reg_rtx = sp_reg_rtx;
28568 if (DEFAULT_ABI == ABI_V4)
28569 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28570 /* Prevent reordering memory accesses against stack pointer restore. */
28571 else if (cfun->calls_alloca
28572 || offset_below_red_zone_p (-info->total_size))
28573 rs6000_emit_stack_tie (frame_reg_rtx, true);
28575 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28576 GEN_INT (info->total_size)));
28577 frame_off = 0;
28579 else if (info->push_p
28580 && DEFAULT_ABI != ABI_V4
28581 && !crtl->calls_eh_return)
28583 /* Prevent reordering memory accesses against stack pointer restore. */
28584 if (cfun->calls_alloca
28585 || offset_below_red_zone_p (-info->total_size))
28586 rs6000_emit_stack_tie (frame_reg_rtx, false);
28587 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28588 GEN_INT (info->total_size)));
28589 frame_off = 0;
28591 if (insn && frame_reg_rtx == sp_reg_rtx)
28593 if (cfa_restores)
28595 REG_NOTES (insn) = cfa_restores;
28596 cfa_restores = NULL_RTX;
28598 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28599 RTX_FRAME_RELATED_P (insn) = 1;
28602 /* Restore AltiVec registers if we have not done so already. */
28603 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28604 && info->altivec_size != 0
28605 && (DEFAULT_ABI == ABI_V4
28606 || !offset_below_red_zone_p (info->altivec_save_offset)))
28608 int i;
28610 if ((strategy & REST_INLINE_VRS) == 0)
28612 int end_save = info->altivec_save_offset + info->altivec_size;
28613 int ptr_off;
28614 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28615 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28616 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28618 if (end_save + frame_off != 0)
28620 rtx offset = GEN_INT (end_save + frame_off);
28622 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28624 else
28625 emit_move_insn (ptr_reg, frame_reg_rtx);
28627 ptr_off = -end_save;
28628 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28629 info->altivec_save_offset + ptr_off,
28630 0, V4SImode, SAVRES_VR);
28631 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28633 /* Frame reg was clobbered by out-of-line save. Restore it
28634 from ptr_reg, and if we are calling out-of-line gpr or
28635 fpr restore set up the correct pointer and offset. */
28636 unsigned newptr_regno = 1;
28637 if (!restoring_GPRs_inline)
28639 bool lr = info->gp_save_offset + info->gp_size == 0;
28640 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28641 newptr_regno = ptr_regno_for_savres (sel);
28642 end_save = info->gp_save_offset + info->gp_size;
28644 else if (!restoring_FPRs_inline)
28646 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28647 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28648 newptr_regno = ptr_regno_for_savres (sel);
28649 end_save = info->fp_save_offset + info->fp_size;
28652 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28653 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28655 if (end_save + ptr_off != 0)
28657 rtx offset = GEN_INT (end_save + ptr_off);
28659 frame_off = -end_save;
28660 if (TARGET_32BIT)
28661 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28662 ptr_reg, offset));
28663 else
28664 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28665 ptr_reg, offset));
28667 else
28669 frame_off = ptr_off;
28670 emit_move_insn (frame_reg_rtx, ptr_reg);
28674 else
28676 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28677 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28679 rtx addr, areg, mem, insn;
28680 rtx reg = gen_rtx_REG (V4SImode, i);
28681 HOST_WIDE_INT offset
28682 = (info->altivec_save_offset + frame_off
28683 + 16 * (i - info->first_altivec_reg_save));
28685 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28687 mem = gen_frame_mem (V4SImode,
28688 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28689 GEN_INT (offset)));
28690 insn = gen_rtx_SET (reg, mem);
28692 else
28694 areg = gen_rtx_REG (Pmode, 0);
28695 emit_move_insn (areg, GEN_INT (offset));
28697 /* AltiVec addressing mode is [reg+reg]. */
28698 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28699 mem = gen_frame_mem (V4SImode, addr);
28701 /* Rather than emitting a generic move, force use of the
28702 lvx instruction, which we always want. In particular we
28703 don't want lxvd2x/xxpermdi for little endian. */
28704 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28707 (void) emit_insn (insn);
28711 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28712 if (((strategy & REST_INLINE_VRS) == 0
28713 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28714 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28715 && save_reg_p (i))
28717 rtx reg = gen_rtx_REG (V4SImode, i);
28718 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28722 /* Restore VRSAVE if we have not done so already. */
28723 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28724 && info->vrsave_size != 0
28725 && (DEFAULT_ABI == ABI_V4
28726 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28728 rtx reg;
28730 reg = gen_rtx_REG (SImode, 12);
28731 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28732 info->vrsave_save_offset + frame_off));
28734 emit_insn (generate_set_vrsave (reg, info, 1));
28737 /* If we exit by an out-of-line restore function on ABI_V4 then that
28738 function will deallocate the stack, so we don't need to worry
28739 about the unwinder restoring cr from an invalid stack frame
28740 location. */
28741 exit_func = (!restoring_FPRs_inline
28742 || (!restoring_GPRs_inline
28743 && info->first_fp_reg_save == 64));
28745 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28746 *separate* slots if the routine calls __builtin_eh_return, so
28747 that they can be independently restored by the unwinder. */
28748 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28750 int i, cr_off = info->ehcr_offset;
28752 for (i = 0; i < 8; i++)
28753 if (!call_used_regs[CR0_REGNO + i])
28755 rtx reg = gen_rtx_REG (SImode, 0);
28756 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28757 cr_off + frame_off));
28759 insn = emit_insn (gen_movsi_to_cr_one
28760 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28762 if (!exit_func && flag_shrink_wrap)
28764 add_reg_note (insn, REG_CFA_RESTORE,
28765 gen_rtx_REG (SImode, CR0_REGNO + i));
28767 RTX_FRAME_RELATED_P (insn) = 1;
28770 cr_off += reg_size;
28774 /* Get the old lr if we saved it. If we are restoring registers
28775 out-of-line, then the out-of-line routines can do this for us. */
28776 if (restore_lr && restoring_GPRs_inline)
28777 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28779 /* Get the old cr if we saved it. */
28780 if (info->cr_save_p)
28782 unsigned cr_save_regno = 12;
28784 if (!restoring_GPRs_inline)
28786 /* Ensure we don't use the register used by the out-of-line
28787 gpr register restore below. */
28788 bool lr = info->gp_save_offset + info->gp_size == 0;
28789 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28790 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28792 if (gpr_ptr_regno == 12)
28793 cr_save_regno = 11;
28794 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28796 else if (REGNO (frame_reg_rtx) == 12)
28797 cr_save_regno = 11;
28799 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28800 info->cr_save_offset + frame_off,
28801 exit_func);
28804 /* Set LR here to try to overlap restores below. */
28805 if (restore_lr && restoring_GPRs_inline)
28806 restore_saved_lr (0, exit_func);
28808 /* Load exception handler data registers, if needed. */
28809 if (crtl->calls_eh_return)
28811 unsigned int i, regno;
28813 if (TARGET_AIX)
28815 rtx reg = gen_rtx_REG (reg_mode, 2);
28816 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28817 frame_off + RS6000_TOC_SAVE_SLOT));
28820 for (i = 0; ; ++i)
28822 rtx mem;
28824 regno = EH_RETURN_DATA_REGNO (i);
28825 if (regno == INVALID_REGNUM)
28826 break;
28828 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28829 info->ehrd_offset + frame_off
28830 + reg_size * (int) i);
28832 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28836 /* Restore GPRs. This is done as a PARALLEL if we are using
28837 the load-multiple instructions. */
28838 if (!restoring_GPRs_inline)
28840 /* We are jumping to an out-of-line function. */
28841 rtx ptr_reg;
28842 int end_save = info->gp_save_offset + info->gp_size;
28843 bool can_use_exit = end_save == 0;
28844 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28845 int ptr_off;
28847 /* Emit stack reset code if we need it. */
28848 ptr_regno = ptr_regno_for_savres (sel);
28849 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28850 if (can_use_exit)
28851 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28852 else if (end_save + frame_off != 0)
28853 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28854 GEN_INT (end_save + frame_off)));
28855 else if (REGNO (frame_reg_rtx) != ptr_regno)
28856 emit_move_insn (ptr_reg, frame_reg_rtx);
28857 if (REGNO (frame_reg_rtx) == ptr_regno)
28858 frame_off = -end_save;
28860 if (can_use_exit && info->cr_save_p)
28861 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28863 ptr_off = -end_save;
28864 rs6000_emit_savres_rtx (info, ptr_reg,
28865 info->gp_save_offset + ptr_off,
28866 info->lr_save_offset + ptr_off,
28867 reg_mode, sel);
28869 else if (using_load_multiple)
28871 rtvec p;
28872 p = rtvec_alloc (32 - info->first_gp_reg_save);
28873 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28874 RTVEC_ELT (p, i)
28875 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28876 frame_reg_rtx,
28877 info->gp_save_offset + frame_off + reg_size * i);
28878 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28880 else
28882 int offset = info->gp_save_offset + frame_off;
28883 for (i = info->first_gp_reg_save; i < 32; i++)
28885 if (save_reg_p (i)
28886 && !cfun->machine->gpr_is_wrapped_separately[i])
28888 rtx reg = gen_rtx_REG (reg_mode, i);
28889 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28892 offset += reg_size;
28896 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28898 /* If the frame pointer was used then we can't delay emitting
28899 a REG_CFA_DEF_CFA note. This must happen on the insn that
28900 restores the frame pointer, r31. We may have already emitted
28901 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28902 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28903 be harmless if emitted. */
28904 if (frame_pointer_needed)
28906 insn = get_last_insn ();
28907 add_reg_note (insn, REG_CFA_DEF_CFA,
28908 plus_constant (Pmode, frame_reg_rtx, frame_off));
28909 RTX_FRAME_RELATED_P (insn) = 1;
28912 /* Set up cfa_restores. We always need these when
28913 shrink-wrapping. If not shrink-wrapping then we only need
28914 the cfa_restore when the stack location is no longer valid.
28915 The cfa_restores must be emitted on or before the insn that
28916 invalidates the stack, and of course must not be emitted
28917 before the insn that actually does the restore. The latter
28918 is why it is a bad idea to emit the cfa_restores as a group
28919 on the last instruction here that actually does a restore:
28920 That insn may be reordered with respect to others doing
28921 restores. */
28922 if (flag_shrink_wrap
28923 && !restoring_GPRs_inline
28924 && info->first_fp_reg_save == 64)
28925 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28927 for (i = info->first_gp_reg_save; i < 32; i++)
28928 if (save_reg_p (i)
28929 && !cfun->machine->gpr_is_wrapped_separately[i])
28931 rtx reg = gen_rtx_REG (reg_mode, i);
28932 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28936 if (!restoring_GPRs_inline
28937 && info->first_fp_reg_save == 64)
28939 /* We are jumping to an out-of-line function. */
28940 if (cfa_restores)
28941 emit_cfa_restores (cfa_restores);
28942 return;
28945 if (restore_lr && !restoring_GPRs_inline)
28947 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28948 restore_saved_lr (0, exit_func);
28951 /* Restore fpr's if we need to do it without calling a function. */
28952 if (restoring_FPRs_inline)
28954 int offset = info->fp_save_offset + frame_off;
28955 for (i = info->first_fp_reg_save; i < 64; i++)
28957 if (save_reg_p (i)
28958 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28960 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28961 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28962 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28963 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28964 cfa_restores);
28967 offset += fp_reg_size;
28971 /* If we saved cr, restore it here. Just those that were used. */
28972 if (info->cr_save_p)
28973 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28975 /* If this is V.4, unwind the stack pointer after all of the loads
28976 have been done, or set up r11 if we are restoring fp out of line. */
28977 ptr_regno = 1;
28978 if (!restoring_FPRs_inline)
28980 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28981 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28982 ptr_regno = ptr_regno_for_savres (sel);
28985 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28986 if (REGNO (frame_reg_rtx) == ptr_regno)
28987 frame_off = 0;
28989 if (insn && restoring_FPRs_inline)
28991 if (cfa_restores)
28993 REG_NOTES (insn) = cfa_restores;
28994 cfa_restores = NULL_RTX;
28996 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28997 RTX_FRAME_RELATED_P (insn) = 1;
29000 if (crtl->calls_eh_return)
29002 rtx sa = EH_RETURN_STACKADJ_RTX;
29003 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
29006 if (!sibcall && restoring_FPRs_inline)
29008 if (cfa_restores)
29010 /* We can't hang the cfa_restores off a simple return,
29011 since the shrink-wrap code sometimes uses an existing
29012 return. This means there might be a path from
29013 pre-prologue code to this return, and dwarf2cfi code
29014 wants the eh_frame unwinder state to be the same on
29015 all paths to any point. So we need to emit the
29016 cfa_restores before the return. For -m64 we really
29017 don't need epilogue cfa_restores at all, except for
29018 this irritating dwarf2cfi with shrink-wrap
29019 requirement; The stack red-zone means eh_frame info
29020 from the prologue telling the unwinder to restore
29021 from the stack is perfectly good right to the end of
29022 the function. */
29023 emit_insn (gen_blockage ());
29024 emit_cfa_restores (cfa_restores);
29025 cfa_restores = NULL_RTX;
29028 emit_jump_insn (targetm.gen_simple_return ());
29031 if (!sibcall && !restoring_FPRs_inline)
29033 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29034 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
29035 int elt = 0;
29036 RTVEC_ELT (p, elt++) = ret_rtx;
29037 if (lr)
29038 RTVEC_ELT (p, elt++)
29039 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
29041 /* We have to restore more than two FP registers, so branch to the
29042 restore function. It will return to our caller. */
29043 int i;
29044 int reg;
29045 rtx sym;
29047 if (flag_shrink_wrap)
29048 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
29050 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
29051 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
29052 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
29053 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
29055 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
29057 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
29059 RTVEC_ELT (p, elt++)
29060 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
29061 if (flag_shrink_wrap
29062 && save_reg_p (info->first_fp_reg_save + i))
29063 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
29066 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
29069 if (cfa_restores)
29071 if (sibcall)
29072 /* Ensure the cfa_restores are hung off an insn that won't
29073 be reordered above other restores. */
29074 emit_insn (gen_blockage ());
29076 emit_cfa_restores (cfa_restores);
29080 /* Write function epilogue. */
29082 static void
29083 rs6000_output_function_epilogue (FILE *file)
29085 #if TARGET_MACHO
29086 macho_branch_islands ();
29089 rtx_insn *insn = get_last_insn ();
29090 rtx_insn *deleted_debug_label = NULL;
29092 /* Mach-O doesn't support labels at the end of objects, so if
29093 it looks like we might want one, take special action.
29095 First, collect any sequence of deleted debug labels. */
29096 while (insn
29097 && NOTE_P (insn)
29098 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29100 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29101 notes only, instead set their CODE_LABEL_NUMBER to -1,
29102 otherwise there would be code generation differences
29103 in between -g and -g0. */
29104 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29105 deleted_debug_label = insn;
29106 insn = PREV_INSN (insn);
29109 /* Second, if we have:
29110 label:
29111 barrier
29112 then this needs to be detected, so skip past the barrier. */
29114 if (insn && BARRIER_P (insn))
29115 insn = PREV_INSN (insn);
29117 /* Up to now we've only seen notes or barriers. */
29118 if (insn)
29120 if (LABEL_P (insn)
29121 || (NOTE_P (insn)
29122 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29123 /* Trailing label: <barrier>. */
29124 fputs ("\tnop\n", file);
29125 else
29127 /* Lastly, see if we have a completely empty function body. */
29128 while (insn && ! INSN_P (insn))
29129 insn = PREV_INSN (insn);
29130 /* If we don't find any insns, we've got an empty function body;
29131 I.e. completely empty - without a return or branch. This is
29132 taken as the case where a function body has been removed
29133 because it contains an inline __builtin_unreachable(). GCC
29134 states that reaching __builtin_unreachable() means UB so we're
29135 not obliged to do anything special; however, we want
29136 non-zero-sized function bodies. To meet this, and help the
29137 user out, let's trap the case. */
29138 if (insn == NULL)
29139 fputs ("\ttrap\n", file);
29142 else if (deleted_debug_label)
29143 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29144 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29145 CODE_LABEL_NUMBER (insn) = -1;
29147 #endif
29149 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29150 on its format.
29152 We don't output a traceback table if -finhibit-size-directive was
29153 used. The documentation for -finhibit-size-directive reads
29154 ``don't output a @code{.size} assembler directive, or anything
29155 else that would cause trouble if the function is split in the
29156 middle, and the two halves are placed at locations far apart in
29157 memory.'' The traceback table has this property, since it
29158 includes the offset from the start of the function to the
29159 traceback table itself.
29161 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29162 different traceback table. */
29163 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29164 && ! flag_inhibit_size_directive
29165 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29167 const char *fname = NULL;
29168 const char *language_string = lang_hooks.name;
29169 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29170 int i;
29171 int optional_tbtab;
29172 rs6000_stack_t *info = rs6000_stack_info ();
29174 if (rs6000_traceback == traceback_full)
29175 optional_tbtab = 1;
29176 else if (rs6000_traceback == traceback_part)
29177 optional_tbtab = 0;
29178 else
29179 optional_tbtab = !optimize_size && !TARGET_ELF;
29181 if (optional_tbtab)
29183 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29184 while (*fname == '.') /* V.4 encodes . in the name */
29185 fname++;
29187 /* Need label immediately before tbtab, so we can compute
29188 its offset from the function start. */
29189 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29190 ASM_OUTPUT_LABEL (file, fname);
29193 /* The .tbtab pseudo-op can only be used for the first eight
29194 expressions, since it can't handle the possibly variable
29195 length fields that follow. However, if you omit the optional
29196 fields, the assembler outputs zeros for all optional fields
29197 anyways, giving each variable length field is minimum length
29198 (as defined in sys/debug.h). Thus we can not use the .tbtab
29199 pseudo-op at all. */
29201 /* An all-zero word flags the start of the tbtab, for debuggers
29202 that have to find it by searching forward from the entry
29203 point or from the current pc. */
29204 fputs ("\t.long 0\n", file);
29206 /* Tbtab format type. Use format type 0. */
29207 fputs ("\t.byte 0,", file);
29209 /* Language type. Unfortunately, there does not seem to be any
29210 official way to discover the language being compiled, so we
29211 use language_string.
29212 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
29213 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29214 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
29215 either, so for now use 0. */
29216 if (lang_GNU_C ()
29217 || ! strcmp (language_string, "GNU GIMPLE")
29218 || ! strcmp (language_string, "GNU Go")
29219 || ! strcmp (language_string, "libgccjit"))
29220 i = 0;
29221 else if (! strcmp (language_string, "GNU F77")
29222 || lang_GNU_Fortran ())
29223 i = 1;
29224 else if (! strcmp (language_string, "GNU Pascal"))
29225 i = 2;
29226 else if (! strcmp (language_string, "GNU Ada"))
29227 i = 3;
29228 else if (lang_GNU_CXX ()
29229 || ! strcmp (language_string, "GNU Objective-C++"))
29230 i = 9;
29231 else if (! strcmp (language_string, "GNU Java"))
29232 i = 13;
29233 else if (! strcmp (language_string, "GNU Objective-C"))
29234 i = 14;
29235 else
29236 gcc_unreachable ();
29237 fprintf (file, "%d,", i);
29239 /* 8 single bit fields: global linkage (not set for C extern linkage,
29240 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29241 from start of procedure stored in tbtab, internal function, function
29242 has controlled storage, function has no toc, function uses fp,
29243 function logs/aborts fp operations. */
29244 /* Assume that fp operations are used if any fp reg must be saved. */
29245 fprintf (file, "%d,",
29246 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29248 /* 6 bitfields: function is interrupt handler, name present in
29249 proc table, function calls alloca, on condition directives
29250 (controls stack walks, 3 bits), saves condition reg, saves
29251 link reg. */
29252 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29253 set up as a frame pointer, even when there is no alloca call. */
29254 fprintf (file, "%d,",
29255 ((optional_tbtab << 6)
29256 | ((optional_tbtab & frame_pointer_needed) << 5)
29257 | (info->cr_save_p << 1)
29258 | (info->lr_save_p)));
29260 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29261 (6 bits). */
29262 fprintf (file, "%d,",
29263 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29265 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29266 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29268 if (optional_tbtab)
29270 /* Compute the parameter info from the function decl argument
29271 list. */
29272 tree decl;
29273 int next_parm_info_bit = 31;
29275 for (decl = DECL_ARGUMENTS (current_function_decl);
29276 decl; decl = DECL_CHAIN (decl))
29278 rtx parameter = DECL_INCOMING_RTL (decl);
29279 machine_mode mode = GET_MODE (parameter);
29281 if (GET_CODE (parameter) == REG)
29283 if (SCALAR_FLOAT_MODE_P (mode))
29285 int bits;
29287 float_parms++;
29289 switch (mode)
29291 case E_SFmode:
29292 case E_SDmode:
29293 bits = 0x2;
29294 break;
29296 case E_DFmode:
29297 case E_DDmode:
29298 case E_TFmode:
29299 case E_TDmode:
29300 case E_IFmode:
29301 case E_KFmode:
29302 bits = 0x3;
29303 break;
29305 default:
29306 gcc_unreachable ();
29309 /* If only one bit will fit, don't or in this entry. */
29310 if (next_parm_info_bit > 0)
29311 parm_info |= (bits << (next_parm_info_bit - 1));
29312 next_parm_info_bit -= 2;
29314 else
29316 fixed_parms += ((GET_MODE_SIZE (mode)
29317 + (UNITS_PER_WORD - 1))
29318 / UNITS_PER_WORD);
29319 next_parm_info_bit -= 1;
29325 /* Number of fixed point parameters. */
29326 /* This is actually the number of words of fixed point parameters; thus
29327 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29328 fprintf (file, "%d,", fixed_parms);
29330 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29331 all on stack. */
29332 /* This is actually the number of fp registers that hold parameters;
29333 and thus the maximum value is 13. */
29334 /* Set parameters on stack bit if parameters are not in their original
29335 registers, regardless of whether they are on the stack? Xlc
29336 seems to set the bit when not optimizing. */
29337 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29339 if (optional_tbtab)
29341 /* Optional fields follow. Some are variable length. */
29343 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29344 float, 11 double float. */
29345 /* There is an entry for each parameter in a register, in the order
29346 that they occur in the parameter list. Any intervening arguments
29347 on the stack are ignored. If the list overflows a long (max
29348 possible length 34 bits) then completely leave off all elements
29349 that don't fit. */
29350 /* Only emit this long if there was at least one parameter. */
29351 if (fixed_parms || float_parms)
29352 fprintf (file, "\t.long %d\n", parm_info);
29354 /* Offset from start of code to tb table. */
29355 fputs ("\t.long ", file);
29356 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29357 RS6000_OUTPUT_BASENAME (file, fname);
29358 putc ('-', file);
29359 rs6000_output_function_entry (file, fname);
29360 putc ('\n', file);
29362 /* Interrupt handler mask. */
29363 /* Omit this long, since we never set the interrupt handler bit
29364 above. */
29366 /* Number of CTL (controlled storage) anchors. */
29367 /* Omit this long, since the has_ctl bit is never set above. */
29369 /* Displacement into stack of each CTL anchor. */
29370 /* Omit this list of longs, because there are no CTL anchors. */
29372 /* Length of function name. */
29373 if (*fname == '*')
29374 ++fname;
29375 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29377 /* Function name. */
29378 assemble_string (fname, strlen (fname));
29380 /* Register for alloca automatic storage; this is always reg 31.
29381 Only emit this if the alloca bit was set above. */
29382 if (frame_pointer_needed)
29383 fputs ("\t.byte 31\n", file);
29385 fputs ("\t.align 2\n", file);
29389 /* Arrange to define .LCTOC1 label, if not already done. */
29390 if (need_toc_init)
29392 need_toc_init = 0;
29393 if (!toc_initialized)
29395 switch_to_section (toc_section);
29396 switch_to_section (current_function_section ());
29401 /* -fsplit-stack support. */
29403 /* A SYMBOL_REF for __morestack. */
29404 static GTY(()) rtx morestack_ref;
29406 static rtx
29407 gen_add3_const (rtx rt, rtx ra, long c)
29409 if (TARGET_64BIT)
29410 return gen_adddi3 (rt, ra, GEN_INT (c));
29411 else
29412 return gen_addsi3 (rt, ra, GEN_INT (c));
29415 /* Emit -fsplit-stack prologue, which goes before the regular function
29416 prologue (at local entry point in the case of ELFv2). */
29418 void
29419 rs6000_expand_split_stack_prologue (void)
29421 rs6000_stack_t *info = rs6000_stack_info ();
29422 unsigned HOST_WIDE_INT allocate;
29423 long alloc_hi, alloc_lo;
29424 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29425 rtx_insn *insn;
29427 gcc_assert (flag_split_stack && reload_completed);
29429 if (!info->push_p)
29430 return;
29432 if (global_regs[29])
29434 error ("%qs uses register r29", "-fsplit-stack");
29435 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29436 "conflicts with %qD", global_regs_decl[29]);
29439 allocate = info->total_size;
29440 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29442 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29443 return;
29445 if (morestack_ref == NULL_RTX)
29447 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29448 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29449 | SYMBOL_FLAG_FUNCTION);
29452 r0 = gen_rtx_REG (Pmode, 0);
29453 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29454 r12 = gen_rtx_REG (Pmode, 12);
29455 emit_insn (gen_load_split_stack_limit (r0));
29456 /* Always emit two insns here to calculate the requested stack,
29457 so that the linker can edit them when adjusting size for calling
29458 non-split-stack code. */
29459 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29460 alloc_lo = -allocate - alloc_hi;
29461 if (alloc_hi != 0)
29463 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29464 if (alloc_lo != 0)
29465 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29466 else
29467 emit_insn (gen_nop ());
29469 else
29471 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29472 emit_insn (gen_nop ());
29475 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29476 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29477 ok_label = gen_label_rtx ();
29478 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29479 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29480 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29481 pc_rtx);
29482 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29483 JUMP_LABEL (insn) = ok_label;
29484 /* Mark the jump as very likely to be taken. */
29485 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29487 lr = gen_rtx_REG (Pmode, LR_REGNO);
29488 insn = emit_move_insn (r0, lr);
29489 RTX_FRAME_RELATED_P (insn) = 1;
29490 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29491 RTX_FRAME_RELATED_P (insn) = 1;
29493 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29494 const0_rtx, const0_rtx));
29495 call_fusage = NULL_RTX;
29496 use_reg (&call_fusage, r12);
29497 /* Say the call uses r0, even though it doesn't, to stop regrename
29498 from twiddling with the insns saving lr, trashing args for cfun.
29499 The insns restoring lr are similarly protected by making
29500 split_stack_return use r0. */
29501 use_reg (&call_fusage, r0);
29502 add_function_usage_to (insn, call_fusage);
29503 /* Indicate that this function can't jump to non-local gotos. */
29504 make_reg_eh_region_note_nothrow_nononlocal (insn);
29505 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29506 insn = emit_move_insn (lr, r0);
29507 add_reg_note (insn, REG_CFA_RESTORE, lr);
29508 RTX_FRAME_RELATED_P (insn) = 1;
29509 emit_insn (gen_split_stack_return ());
29511 emit_label (ok_label);
29512 LABEL_NUSES (ok_label) = 1;
29515 /* Return the internal arg pointer used for function incoming
29516 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29517 to copy it to a pseudo in order for it to be preserved over calls
29518 and suchlike. We'd really like to use a pseudo here for the
29519 internal arg pointer but data-flow analysis is not prepared to
29520 accept pseudos as live at the beginning of a function. */
29522 static rtx
29523 rs6000_internal_arg_pointer (void)
29525 if (flag_split_stack
29526 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29527 == NULL))
29530 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29532 rtx pat;
29534 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29535 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29537 /* Put the pseudo initialization right after the note at the
29538 beginning of the function. */
29539 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29540 gen_rtx_REG (Pmode, 12));
29541 push_topmost_sequence ();
29542 emit_insn_after (pat, get_insns ());
29543 pop_topmost_sequence ();
29545 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29546 FIRST_PARM_OFFSET (current_function_decl));
29548 return virtual_incoming_args_rtx;
29551 /* We may have to tell the dataflow pass that the split stack prologue
29552 is initializing a register. */
29554 static void
29555 rs6000_live_on_entry (bitmap regs)
29557 if (flag_split_stack)
29558 bitmap_set_bit (regs, 12);
29561 /* Emit -fsplit-stack dynamic stack allocation space check. */
29563 void
29564 rs6000_split_stack_space_check (rtx size, rtx label)
29566 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29567 rtx limit = gen_reg_rtx (Pmode);
29568 rtx requested = gen_reg_rtx (Pmode);
29569 rtx cmp = gen_reg_rtx (CCUNSmode);
29570 rtx jump;
29572 emit_insn (gen_load_split_stack_limit (limit));
29573 if (CONST_INT_P (size))
29574 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29575 else
29577 size = force_reg (Pmode, size);
29578 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29580 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29581 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29582 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29583 gen_rtx_LABEL_REF (VOIDmode, label),
29584 pc_rtx);
29585 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29586 JUMP_LABEL (jump) = label;
29589 /* A C compound statement that outputs the assembler code for a thunk
29590 function, used to implement C++ virtual function calls with
29591 multiple inheritance. The thunk acts as a wrapper around a virtual
29592 function, adjusting the implicit object parameter before handing
29593 control off to the real function.
29595 First, emit code to add the integer DELTA to the location that
29596 contains the incoming first argument. Assume that this argument
29597 contains a pointer, and is the one used to pass the `this' pointer
29598 in C++. This is the incoming argument *before* the function
29599 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29600 values of all other incoming arguments.
29602 After the addition, emit code to jump to FUNCTION, which is a
29603 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29604 not touch the return address. Hence returning from FUNCTION will
29605 return to whoever called the current `thunk'.
29607 The effect must be as if FUNCTION had been called directly with the
29608 adjusted first argument. This macro is responsible for emitting
29609 all of the code for a thunk function; output_function_prologue()
29610 and output_function_epilogue() are not invoked.
29612 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29613 been extracted from it.) It might possibly be useful on some
29614 targets, but probably not.
29616 If you do not define this macro, the target-independent code in the
29617 C++ frontend will generate a less efficient heavyweight thunk that
29618 calls FUNCTION instead of jumping to it. The generic approach does
29619 not support varargs. */
29621 static void
29622 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29623 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29624 tree function)
29626 rtx this_rtx, funexp;
29627 rtx_insn *insn;
29629 reload_completed = 1;
29630 epilogue_completed = 1;
29632 /* Mark the end of the (empty) prologue. */
29633 emit_note (NOTE_INSN_PROLOGUE_END);
29635 /* Find the "this" pointer. If the function returns a structure,
29636 the structure return pointer is in r3. */
29637 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29638 this_rtx = gen_rtx_REG (Pmode, 4);
29639 else
29640 this_rtx = gen_rtx_REG (Pmode, 3);
29642 /* Apply the constant offset, if required. */
29643 if (delta)
29644 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29646 /* Apply the offset from the vtable, if required. */
29647 if (vcall_offset)
29649 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29650 rtx tmp = gen_rtx_REG (Pmode, 12);
29652 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29653 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29655 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29656 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29658 else
29660 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29662 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29664 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29667 /* Generate a tail call to the target function. */
29668 if (!TREE_USED (function))
29670 assemble_external (function);
29671 TREE_USED (function) = 1;
29673 funexp = XEXP (DECL_RTL (function), 0);
29674 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29676 #if TARGET_MACHO
29677 if (MACHOPIC_INDIRECT)
29678 funexp = machopic_indirect_call_target (funexp);
29679 #endif
29681 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29682 generate sibcall RTL explicitly. */
29683 insn = emit_call_insn (
29684 gen_rtx_PARALLEL (VOIDmode,
29685 gen_rtvec (3,
29686 gen_rtx_CALL (VOIDmode,
29687 funexp, const0_rtx),
29688 gen_rtx_USE (VOIDmode, const0_rtx),
29689 simple_return_rtx)));
29690 SIBLING_CALL_P (insn) = 1;
29691 emit_barrier ();
29693 /* Run just enough of rest_of_compilation to get the insns emitted.
29694 There's not really enough bulk here to make other passes such as
29695 instruction scheduling worth while. Note that use_thunk calls
29696 assemble_start_function and assemble_end_function. */
29697 insn = get_insns ();
29698 shorten_branches (insn);
29699 final_start_function (insn, file, 1);
29700 final (insn, file, 1);
29701 final_end_function ();
29703 reload_completed = 0;
29704 epilogue_completed = 0;
29707 /* A quick summary of the various types of 'constant-pool tables'
29708 under PowerPC:
29710 Target Flags Name One table per
29711 AIX (none) AIX TOC object file
29712 AIX -mfull-toc AIX TOC object file
29713 AIX -mminimal-toc AIX minimal TOC translation unit
29714 SVR4/EABI (none) SVR4 SDATA object file
29715 SVR4/EABI -fpic SVR4 pic object file
29716 SVR4/EABI -fPIC SVR4 PIC translation unit
29717 SVR4/EABI -mrelocatable EABI TOC function
29718 SVR4/EABI -maix AIX TOC object file
29719 SVR4/EABI -maix -mminimal-toc
29720 AIX minimal TOC translation unit
29722 Name Reg. Set by entries contains:
29723 made by addrs? fp? sum?
29725 AIX TOC 2 crt0 as Y option option
29726 AIX minimal TOC 30 prolog gcc Y Y option
29727 SVR4 SDATA 13 crt0 gcc N Y N
29728 SVR4 pic 30 prolog ld Y not yet N
29729 SVR4 PIC 30 prolog gcc Y option option
29730 EABI TOC 30 prolog gcc Y option option
29734 /* Hash functions for the hash table. */
29736 static unsigned
29737 rs6000_hash_constant (rtx k)
29739 enum rtx_code code = GET_CODE (k);
29740 machine_mode mode = GET_MODE (k);
29741 unsigned result = (code << 3) ^ mode;
29742 const char *format;
29743 int flen, fidx;
29745 format = GET_RTX_FORMAT (code);
29746 flen = strlen (format);
29747 fidx = 0;
29749 switch (code)
29751 case LABEL_REF:
29752 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29754 case CONST_WIDE_INT:
29756 int i;
29757 flen = CONST_WIDE_INT_NUNITS (k);
29758 for (i = 0; i < flen; i++)
29759 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29760 return result;
29763 case CONST_DOUBLE:
29764 if (mode != VOIDmode)
29765 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29766 flen = 2;
29767 break;
29769 case CODE_LABEL:
29770 fidx = 3;
29771 break;
29773 default:
29774 break;
29777 for (; fidx < flen; fidx++)
29778 switch (format[fidx])
29780 case 's':
29782 unsigned i, len;
29783 const char *str = XSTR (k, fidx);
29784 len = strlen (str);
29785 result = result * 613 + len;
29786 for (i = 0; i < len; i++)
29787 result = result * 613 + (unsigned) str[i];
29788 break;
29790 case 'u':
29791 case 'e':
29792 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29793 break;
29794 case 'i':
29795 case 'n':
29796 result = result * 613 + (unsigned) XINT (k, fidx);
29797 break;
29798 case 'w':
29799 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29800 result = result * 613 + (unsigned) XWINT (k, fidx);
29801 else
29803 size_t i;
29804 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29805 result = result * 613 + (unsigned) (XWINT (k, fidx)
29806 >> CHAR_BIT * i);
29808 break;
29809 case '0':
29810 break;
29811 default:
29812 gcc_unreachable ();
29815 return result;
29818 hashval_t
29819 toc_hasher::hash (toc_hash_struct *thc)
29821 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29824 /* Compare H1 and H2 for equivalence. */
29826 bool
29827 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29829 rtx r1 = h1->key;
29830 rtx r2 = h2->key;
29832 if (h1->key_mode != h2->key_mode)
29833 return 0;
29835 return rtx_equal_p (r1, r2);
29838 /* These are the names given by the C++ front-end to vtables, and
29839 vtable-like objects. Ideally, this logic should not be here;
29840 instead, there should be some programmatic way of inquiring as
29841 to whether or not an object is a vtable. */
29843 #define VTABLE_NAME_P(NAME) \
29844 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29845 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29846 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29847 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29848 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29850 #ifdef NO_DOLLAR_IN_LABEL
29851 /* Return a GGC-allocated character string translating dollar signs in
29852 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29854 const char *
29855 rs6000_xcoff_strip_dollar (const char *name)
29857 char *strip, *p;
29858 const char *q;
29859 size_t len;
29861 q = (const char *) strchr (name, '$');
29863 if (q == 0 || q == name)
29864 return name;
29866 len = strlen (name);
29867 strip = XALLOCAVEC (char, len + 1);
29868 strcpy (strip, name);
29869 p = strip + (q - name);
29870 while (p)
29872 *p = '_';
29873 p = strchr (p + 1, '$');
29876 return ggc_alloc_string (strip, len);
29878 #endif
29880 void
29881 rs6000_output_symbol_ref (FILE *file, rtx x)
29883 const char *name = XSTR (x, 0);
29885 /* Currently C++ toc references to vtables can be emitted before it
29886 is decided whether the vtable is public or private. If this is
29887 the case, then the linker will eventually complain that there is
29888 a reference to an unknown section. Thus, for vtables only,
29889 we emit the TOC reference to reference the identifier and not the
29890 symbol. */
29891 if (VTABLE_NAME_P (name))
29893 RS6000_OUTPUT_BASENAME (file, name);
29895 else
29896 assemble_name (file, name);
29899 /* Output a TOC entry. We derive the entry name from what is being
29900 written. */
29902 void
29903 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29905 char buf[256];
29906 const char *name = buf;
29907 rtx base = x;
29908 HOST_WIDE_INT offset = 0;
29910 gcc_assert (!TARGET_NO_TOC);
29912 /* When the linker won't eliminate them, don't output duplicate
29913 TOC entries (this happens on AIX if there is any kind of TOC,
29914 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29915 CODE_LABELs. */
29916 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29918 struct toc_hash_struct *h;
29920 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29921 time because GGC is not initialized at that point. */
29922 if (toc_hash_table == NULL)
29923 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29925 h = ggc_alloc<toc_hash_struct> ();
29926 h->key = x;
29927 h->key_mode = mode;
29928 h->labelno = labelno;
29930 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29931 if (*found == NULL)
29932 *found = h;
29933 else /* This is indeed a duplicate.
29934 Set this label equal to that label. */
29936 fputs ("\t.set ", file);
29937 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29938 fprintf (file, "%d,", labelno);
29939 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29940 fprintf (file, "%d\n", ((*found)->labelno));
29942 #ifdef HAVE_AS_TLS
29943 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29944 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29945 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29947 fputs ("\t.set ", file);
29948 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29949 fprintf (file, "%d,", labelno);
29950 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29951 fprintf (file, "%d\n", ((*found)->labelno));
29953 #endif
29954 return;
29958 /* If we're going to put a double constant in the TOC, make sure it's
29959 aligned properly when strict alignment is on. */
29960 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29961 && STRICT_ALIGNMENT
29962 && GET_MODE_BITSIZE (mode) >= 64
29963 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29964 ASM_OUTPUT_ALIGN (file, 3);
29967 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29969 /* Handle FP constants specially. Note that if we have a minimal
29970 TOC, things we put here aren't actually in the TOC, so we can allow
29971 FP constants. */
29972 if (GET_CODE (x) == CONST_DOUBLE &&
29973 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29974 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29976 long k[4];
29978 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29979 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29980 else
29981 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29983 if (TARGET_64BIT)
29985 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29986 fputs (DOUBLE_INT_ASM_OP, file);
29987 else
29988 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29989 k[0] & 0xffffffff, k[1] & 0xffffffff,
29990 k[2] & 0xffffffff, k[3] & 0xffffffff);
29991 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29992 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29993 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29994 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29995 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29996 return;
29998 else
30000 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30001 fputs ("\t.long ", file);
30002 else
30003 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
30004 k[0] & 0xffffffff, k[1] & 0xffffffff,
30005 k[2] & 0xffffffff, k[3] & 0xffffffff);
30006 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
30007 k[0] & 0xffffffff, k[1] & 0xffffffff,
30008 k[2] & 0xffffffff, k[3] & 0xffffffff);
30009 return;
30012 else if (GET_CODE (x) == CONST_DOUBLE &&
30013 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
30015 long k[2];
30017 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30018 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
30019 else
30020 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
30022 if (TARGET_64BIT)
30024 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30025 fputs (DOUBLE_INT_ASM_OP, file);
30026 else
30027 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30028 k[0] & 0xffffffff, k[1] & 0xffffffff);
30029 fprintf (file, "0x%lx%08lx\n",
30030 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
30031 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
30032 return;
30034 else
30036 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30037 fputs ("\t.long ", file);
30038 else
30039 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
30040 k[0] & 0xffffffff, k[1] & 0xffffffff);
30041 fprintf (file, "0x%lx,0x%lx\n",
30042 k[0] & 0xffffffff, k[1] & 0xffffffff);
30043 return;
30046 else if (GET_CODE (x) == CONST_DOUBLE &&
30047 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
30049 long l;
30051 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
30052 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
30053 else
30054 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
30056 if (TARGET_64BIT)
30058 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30059 fputs (DOUBLE_INT_ASM_OP, file);
30060 else
30061 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30062 if (WORDS_BIG_ENDIAN)
30063 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
30064 else
30065 fprintf (file, "0x%lx\n", l & 0xffffffff);
30066 return;
30068 else
30070 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30071 fputs ("\t.long ", file);
30072 else
30073 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30074 fprintf (file, "0x%lx\n", l & 0xffffffff);
30075 return;
30078 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
30080 unsigned HOST_WIDE_INT low;
30081 HOST_WIDE_INT high;
30083 low = INTVAL (x) & 0xffffffff;
30084 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30086 /* TOC entries are always Pmode-sized, so when big-endian
30087 smaller integer constants in the TOC need to be padded.
30088 (This is still a win over putting the constants in
30089 a separate constant pool, because then we'd have
30090 to have both a TOC entry _and_ the actual constant.)
30092 For a 32-bit target, CONST_INT values are loaded and shifted
30093 entirely within `low' and can be stored in one TOC entry. */
30095 /* It would be easy to make this work, but it doesn't now. */
30096 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30098 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30100 low |= high << 32;
30101 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30102 high = (HOST_WIDE_INT) low >> 32;
30103 low &= 0xffffffff;
30106 if (TARGET_64BIT)
30108 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30109 fputs (DOUBLE_INT_ASM_OP, file);
30110 else
30111 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30112 (long) high & 0xffffffff, (long) low & 0xffffffff);
30113 fprintf (file, "0x%lx%08lx\n",
30114 (long) high & 0xffffffff, (long) low & 0xffffffff);
30115 return;
30117 else
30119 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30121 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30122 fputs ("\t.long ", file);
30123 else
30124 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30125 (long) high & 0xffffffff, (long) low & 0xffffffff);
30126 fprintf (file, "0x%lx,0x%lx\n",
30127 (long) high & 0xffffffff, (long) low & 0xffffffff);
30129 else
30131 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30132 fputs ("\t.long ", file);
30133 else
30134 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30135 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30137 return;
30141 if (GET_CODE (x) == CONST)
30143 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30144 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
30146 base = XEXP (XEXP (x, 0), 0);
30147 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30150 switch (GET_CODE (base))
30152 case SYMBOL_REF:
30153 name = XSTR (base, 0);
30154 break;
30156 case LABEL_REF:
30157 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30158 CODE_LABEL_NUMBER (XEXP (base, 0)));
30159 break;
30161 case CODE_LABEL:
30162 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30163 break;
30165 default:
30166 gcc_unreachable ();
30169 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30170 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30171 else
30173 fputs ("\t.tc ", file);
30174 RS6000_OUTPUT_BASENAME (file, name);
30176 if (offset < 0)
30177 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30178 else if (offset)
30179 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30181 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30182 after other TOC symbols, reducing overflow of small TOC access
30183 to [TC] symbols. */
30184 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30185 ? "[TE]," : "[TC],", file);
30188 /* Currently C++ toc references to vtables can be emitted before it
30189 is decided whether the vtable is public or private. If this is
30190 the case, then the linker will eventually complain that there is
30191 a TOC reference to an unknown section. Thus, for vtables only,
30192 we emit the TOC reference to reference the symbol and not the
30193 section. */
30194 if (VTABLE_NAME_P (name))
30196 RS6000_OUTPUT_BASENAME (file, name);
30197 if (offset < 0)
30198 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30199 else if (offset > 0)
30200 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30202 else
30203 output_addr_const (file, x);
30205 #if HAVE_AS_TLS
30206 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
30208 switch (SYMBOL_REF_TLS_MODEL (base))
30210 case 0:
30211 break;
30212 case TLS_MODEL_LOCAL_EXEC:
30213 fputs ("@le", file);
30214 break;
30215 case TLS_MODEL_INITIAL_EXEC:
30216 fputs ("@ie", file);
30217 break;
30218 /* Use global-dynamic for local-dynamic. */
30219 case TLS_MODEL_GLOBAL_DYNAMIC:
30220 case TLS_MODEL_LOCAL_DYNAMIC:
30221 putc ('\n', file);
30222 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30223 fputs ("\t.tc .", file);
30224 RS6000_OUTPUT_BASENAME (file, name);
30225 fputs ("[TC],", file);
30226 output_addr_const (file, x);
30227 fputs ("@m", file);
30228 break;
30229 default:
30230 gcc_unreachable ();
30233 #endif
30235 putc ('\n', file);
30238 /* Output an assembler pseudo-op to write an ASCII string of N characters
30239 starting at P to FILE.
30241 On the RS/6000, we have to do this using the .byte operation and
30242 write out special characters outside the quoted string.
30243 Also, the assembler is broken; very long strings are truncated,
30244 so we must artificially break them up early. */
30246 void
30247 output_ascii (FILE *file, const char *p, int n)
30249 char c;
30250 int i, count_string;
30251 const char *for_string = "\t.byte \"";
30252 const char *for_decimal = "\t.byte ";
30253 const char *to_close = NULL;
30255 count_string = 0;
30256 for (i = 0; i < n; i++)
30258 c = *p++;
30259 if (c >= ' ' && c < 0177)
30261 if (for_string)
30262 fputs (for_string, file);
30263 putc (c, file);
30265 /* Write two quotes to get one. */
30266 if (c == '"')
30268 putc (c, file);
30269 ++count_string;
30272 for_string = NULL;
30273 for_decimal = "\"\n\t.byte ";
30274 to_close = "\"\n";
30275 ++count_string;
30277 if (count_string >= 512)
30279 fputs (to_close, file);
30281 for_string = "\t.byte \"";
30282 for_decimal = "\t.byte ";
30283 to_close = NULL;
30284 count_string = 0;
30287 else
30289 if (for_decimal)
30290 fputs (for_decimal, file);
30291 fprintf (file, "%d", c);
30293 for_string = "\n\t.byte \"";
30294 for_decimal = ", ";
30295 to_close = "\n";
30296 count_string = 0;
30300 /* Now close the string if we have written one. Then end the line. */
30301 if (to_close)
30302 fputs (to_close, file);
30305 /* Generate a unique section name for FILENAME for a section type
30306 represented by SECTION_DESC. Output goes into BUF.
30308 SECTION_DESC can be any string, as long as it is different for each
30309 possible section type.
30311 We name the section in the same manner as xlc. The name begins with an
30312 underscore followed by the filename (after stripping any leading directory
30313 names) with the last period replaced by the string SECTION_DESC. If
30314 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30315 the name. */
30317 void
30318 rs6000_gen_section_name (char **buf, const char *filename,
30319 const char *section_desc)
30321 const char *q, *after_last_slash, *last_period = 0;
30322 char *p;
30323 int len;
30325 after_last_slash = filename;
30326 for (q = filename; *q; q++)
30328 if (*q == '/')
30329 after_last_slash = q + 1;
30330 else if (*q == '.')
30331 last_period = q;
30334 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30335 *buf = (char *) xmalloc (len);
30337 p = *buf;
30338 *p++ = '_';
30340 for (q = after_last_slash; *q; q++)
30342 if (q == last_period)
30344 strcpy (p, section_desc);
30345 p += strlen (section_desc);
30346 break;
30349 else if (ISALNUM (*q))
30350 *p++ = *q;
30353 if (last_period == 0)
30354 strcpy (p, section_desc);
30355 else
30356 *p = '\0';
30359 /* Emit profile function. */
30361 void
30362 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30364 /* Non-standard profiling for kernels, which just saves LR then calls
30365 _mcount without worrying about arg saves. The idea is to change
30366 the function prologue as little as possible as it isn't easy to
30367 account for arg save/restore code added just for _mcount. */
30368 if (TARGET_PROFILE_KERNEL)
30369 return;
30371 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30373 #ifndef NO_PROFILE_COUNTERS
30374 # define NO_PROFILE_COUNTERS 0
30375 #endif
30376 if (NO_PROFILE_COUNTERS)
30377 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30378 LCT_NORMAL, VOIDmode);
30379 else
30381 char buf[30];
30382 const char *label_name;
30383 rtx fun;
30385 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30386 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30387 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30389 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30390 LCT_NORMAL, VOIDmode, fun, Pmode);
30393 else if (DEFAULT_ABI == ABI_DARWIN)
30395 const char *mcount_name = RS6000_MCOUNT;
30396 int caller_addr_regno = LR_REGNO;
30398 /* Be conservative and always set this, at least for now. */
30399 crtl->uses_pic_offset_table = 1;
30401 #if TARGET_MACHO
30402 /* For PIC code, set up a stub and collect the caller's address
30403 from r0, which is where the prologue puts it. */
30404 if (MACHOPIC_INDIRECT
30405 && crtl->uses_pic_offset_table)
30406 caller_addr_regno = 0;
30407 #endif
30408 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30409 LCT_NORMAL, VOIDmode,
30410 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30414 /* Write function profiler code. */
30416 void
30417 output_function_profiler (FILE *file, int labelno)
30419 char buf[100];
30421 switch (DEFAULT_ABI)
30423 default:
30424 gcc_unreachable ();
30426 case ABI_V4:
30427 if (!TARGET_32BIT)
30429 warning (0, "no profiling of 64-bit code for this ABI");
30430 return;
30432 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30433 fprintf (file, "\tmflr %s\n", reg_names[0]);
30434 if (NO_PROFILE_COUNTERS)
30436 asm_fprintf (file, "\tstw %s,4(%s)\n",
30437 reg_names[0], reg_names[1]);
30439 else if (TARGET_SECURE_PLT && flag_pic)
30441 if (TARGET_LINK_STACK)
30443 char name[32];
30444 get_ppc476_thunk_name (name);
30445 asm_fprintf (file, "\tbl %s\n", name);
30447 else
30448 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30449 asm_fprintf (file, "\tstw %s,4(%s)\n",
30450 reg_names[0], reg_names[1]);
30451 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30452 asm_fprintf (file, "\taddis %s,%s,",
30453 reg_names[12], reg_names[12]);
30454 assemble_name (file, buf);
30455 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30456 assemble_name (file, buf);
30457 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30459 else if (flag_pic == 1)
30461 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30462 asm_fprintf (file, "\tstw %s,4(%s)\n",
30463 reg_names[0], reg_names[1]);
30464 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30465 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30466 assemble_name (file, buf);
30467 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30469 else if (flag_pic > 1)
30471 asm_fprintf (file, "\tstw %s,4(%s)\n",
30472 reg_names[0], reg_names[1]);
30473 /* Now, we need to get the address of the label. */
30474 if (TARGET_LINK_STACK)
30476 char name[32];
30477 get_ppc476_thunk_name (name);
30478 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30479 assemble_name (file, buf);
30480 fputs ("-.\n1:", file);
30481 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30482 asm_fprintf (file, "\taddi %s,%s,4\n",
30483 reg_names[11], reg_names[11]);
30485 else
30487 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30488 assemble_name (file, buf);
30489 fputs ("-.\n1:", file);
30490 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30492 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30493 reg_names[0], reg_names[11]);
30494 asm_fprintf (file, "\tadd %s,%s,%s\n",
30495 reg_names[0], reg_names[0], reg_names[11]);
30497 else
30499 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30500 assemble_name (file, buf);
30501 fputs ("@ha\n", file);
30502 asm_fprintf (file, "\tstw %s,4(%s)\n",
30503 reg_names[0], reg_names[1]);
30504 asm_fprintf (file, "\tla %s,", reg_names[0]);
30505 assemble_name (file, buf);
30506 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30509 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30510 fprintf (file, "\tbl %s%s\n",
30511 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30512 break;
30514 case ABI_AIX:
30515 case ABI_ELFv2:
30516 case ABI_DARWIN:
30517 /* Don't do anything, done in output_profile_hook (). */
30518 break;
30524 /* The following variable value is the last issued insn. */
30526 static rtx_insn *last_scheduled_insn;
30528 /* The following variable helps to balance issuing of load and
30529 store instructions */
30531 static int load_store_pendulum;
30533 /* The following variable helps pair divide insns during scheduling. */
30534 static int divide_cnt;
30535 /* The following variable helps pair and alternate vector and vector load
30536 insns during scheduling. */
30537 static int vec_pairing;
30540 /* Power4 load update and store update instructions are cracked into a
30541 load or store and an integer insn which are executed in the same cycle.
30542 Branches have their own dispatch slot which does not count against the
30543 GCC issue rate, but it changes the program flow so there are no other
30544 instructions to issue in this cycle. */
30546 static int
30547 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30549 last_scheduled_insn = insn;
30550 if (GET_CODE (PATTERN (insn)) == USE
30551 || GET_CODE (PATTERN (insn)) == CLOBBER)
30553 cached_can_issue_more = more;
30554 return cached_can_issue_more;
30557 if (insn_terminates_group_p (insn, current_group))
30559 cached_can_issue_more = 0;
30560 return cached_can_issue_more;
30563 /* If no reservation, but reach here */
30564 if (recog_memoized (insn) < 0)
30565 return more;
30567 if (rs6000_sched_groups)
30569 if (is_microcoded_insn (insn))
30570 cached_can_issue_more = 0;
30571 else if (is_cracked_insn (insn))
30572 cached_can_issue_more = more > 2 ? more - 2 : 0;
30573 else
30574 cached_can_issue_more = more - 1;
30576 return cached_can_issue_more;
30579 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30580 return 0;
30582 cached_can_issue_more = more - 1;
30583 return cached_can_issue_more;
30586 static int
30587 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30589 int r = rs6000_variable_issue_1 (insn, more);
30590 if (verbose)
30591 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30592 return r;
30595 /* Adjust the cost of a scheduling dependency. Return the new cost of
30596 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30598 static int
30599 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30600 unsigned int)
30602 enum attr_type attr_type;
30604 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30605 return cost;
30607 switch (dep_type)
30609 case REG_DEP_TRUE:
30611 /* Data dependency; DEP_INSN writes a register that INSN reads
30612 some cycles later. */
30614 /* Separate a load from a narrower, dependent store. */
30615 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30616 && GET_CODE (PATTERN (insn)) == SET
30617 && GET_CODE (PATTERN (dep_insn)) == SET
30618 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30619 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30620 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30621 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30622 return cost + 14;
30624 attr_type = get_attr_type (insn);
30626 switch (attr_type)
30628 case TYPE_JMPREG:
30629 /* Tell the first scheduling pass about the latency between
30630 a mtctr and bctr (and mtlr and br/blr). The first
30631 scheduling pass will not know about this latency since
30632 the mtctr instruction, which has the latency associated
30633 to it, will be generated by reload. */
30634 return 4;
30635 case TYPE_BRANCH:
30636 /* Leave some extra cycles between a compare and its
30637 dependent branch, to inhibit expensive mispredicts. */
30638 if ((rs6000_cpu_attr == CPU_PPC603
30639 || rs6000_cpu_attr == CPU_PPC604
30640 || rs6000_cpu_attr == CPU_PPC604E
30641 || rs6000_cpu_attr == CPU_PPC620
30642 || rs6000_cpu_attr == CPU_PPC630
30643 || rs6000_cpu_attr == CPU_PPC750
30644 || rs6000_cpu_attr == CPU_PPC7400
30645 || rs6000_cpu_attr == CPU_PPC7450
30646 || rs6000_cpu_attr == CPU_PPCE5500
30647 || rs6000_cpu_attr == CPU_PPCE6500
30648 || rs6000_cpu_attr == CPU_POWER4
30649 || rs6000_cpu_attr == CPU_POWER5
30650 || rs6000_cpu_attr == CPU_POWER7
30651 || rs6000_cpu_attr == CPU_POWER8
30652 || rs6000_cpu_attr == CPU_POWER9
30653 || rs6000_cpu_attr == CPU_CELL)
30654 && recog_memoized (dep_insn)
30655 && (INSN_CODE (dep_insn) >= 0))
30657 switch (get_attr_type (dep_insn))
30659 case TYPE_CMP:
30660 case TYPE_FPCOMPARE:
30661 case TYPE_CR_LOGICAL:
30662 case TYPE_DELAYED_CR:
30663 return cost + 2;
30664 case TYPE_EXTS:
30665 case TYPE_MUL:
30666 if (get_attr_dot (dep_insn) == DOT_YES)
30667 return cost + 2;
30668 else
30669 break;
30670 case TYPE_SHIFT:
30671 if (get_attr_dot (dep_insn) == DOT_YES
30672 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30673 return cost + 2;
30674 else
30675 break;
30676 default:
30677 break;
30679 break;
30681 case TYPE_STORE:
30682 case TYPE_FPSTORE:
30683 if ((rs6000_cpu == PROCESSOR_POWER6)
30684 && recog_memoized (dep_insn)
30685 && (INSN_CODE (dep_insn) >= 0))
30688 if (GET_CODE (PATTERN (insn)) != SET)
30689 /* If this happens, we have to extend this to schedule
30690 optimally. Return default for now. */
30691 return cost;
30693 /* Adjust the cost for the case where the value written
30694 by a fixed point operation is used as the address
30695 gen value on a store. */
30696 switch (get_attr_type (dep_insn))
30698 case TYPE_LOAD:
30699 case TYPE_CNTLZ:
30701 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30702 return get_attr_sign_extend (dep_insn)
30703 == SIGN_EXTEND_YES ? 6 : 4;
30704 break;
30706 case TYPE_SHIFT:
30708 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30709 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30710 6 : 3;
30711 break;
30713 case TYPE_INTEGER:
30714 case TYPE_ADD:
30715 case TYPE_LOGICAL:
30716 case TYPE_EXTS:
30717 case TYPE_INSERT:
30719 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30720 return 3;
30721 break;
30723 case TYPE_STORE:
30724 case TYPE_FPLOAD:
30725 case TYPE_FPSTORE:
30727 if (get_attr_update (dep_insn) == UPDATE_YES
30728 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30729 return 3;
30730 break;
30732 case TYPE_MUL:
30734 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30735 return 17;
30736 break;
30738 case TYPE_DIV:
30740 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30741 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30742 break;
30744 default:
30745 break;
30748 break;
30750 case TYPE_LOAD:
30751 if ((rs6000_cpu == PROCESSOR_POWER6)
30752 && recog_memoized (dep_insn)
30753 && (INSN_CODE (dep_insn) >= 0))
30756 /* Adjust the cost for the case where the value written
30757 by a fixed point instruction is used within the address
30758 gen portion of a subsequent load(u)(x) */
30759 switch (get_attr_type (dep_insn))
30761 case TYPE_LOAD:
30762 case TYPE_CNTLZ:
30764 if (set_to_load_agen (dep_insn, insn))
30765 return get_attr_sign_extend (dep_insn)
30766 == SIGN_EXTEND_YES ? 6 : 4;
30767 break;
30769 case TYPE_SHIFT:
30771 if (set_to_load_agen (dep_insn, insn))
30772 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30773 6 : 3;
30774 break;
30776 case TYPE_INTEGER:
30777 case TYPE_ADD:
30778 case TYPE_LOGICAL:
30779 case TYPE_EXTS:
30780 case TYPE_INSERT:
30782 if (set_to_load_agen (dep_insn, insn))
30783 return 3;
30784 break;
30786 case TYPE_STORE:
30787 case TYPE_FPLOAD:
30788 case TYPE_FPSTORE:
30790 if (get_attr_update (dep_insn) == UPDATE_YES
30791 && set_to_load_agen (dep_insn, insn))
30792 return 3;
30793 break;
30795 case TYPE_MUL:
30797 if (set_to_load_agen (dep_insn, insn))
30798 return 17;
30799 break;
30801 case TYPE_DIV:
30803 if (set_to_load_agen (dep_insn, insn))
30804 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30805 break;
30807 default:
30808 break;
30811 break;
30813 case TYPE_FPLOAD:
30814 if ((rs6000_cpu == PROCESSOR_POWER6)
30815 && get_attr_update (insn) == UPDATE_NO
30816 && recog_memoized (dep_insn)
30817 && (INSN_CODE (dep_insn) >= 0)
30818 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30819 return 2;
30821 default:
30822 break;
30825 /* Fall out to return default cost. */
30827 break;
30829 case REG_DEP_OUTPUT:
30830 /* Output dependency; DEP_INSN writes a register that INSN writes some
30831 cycles later. */
30832 if ((rs6000_cpu == PROCESSOR_POWER6)
30833 && recog_memoized (dep_insn)
30834 && (INSN_CODE (dep_insn) >= 0))
30836 attr_type = get_attr_type (insn);
30838 switch (attr_type)
30840 case TYPE_FP:
30841 case TYPE_FPSIMPLE:
30842 if (get_attr_type (dep_insn) == TYPE_FP
30843 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30844 return 1;
30845 break;
30846 case TYPE_FPLOAD:
30847 if (get_attr_update (insn) == UPDATE_NO
30848 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30849 return 2;
30850 break;
30851 default:
30852 break;
30855 /* Fall through, no cost for output dependency. */
30856 /* FALLTHRU */
30858 case REG_DEP_ANTI:
30859 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30860 cycles later. */
30861 return 0;
30863 default:
30864 gcc_unreachable ();
30867 return cost;
30870 /* Debug version of rs6000_adjust_cost. */
30872 static int
30873 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30874 int cost, unsigned int dw)
30876 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30878 if (ret != cost)
30880 const char *dep;
30882 switch (dep_type)
30884 default: dep = "unknown depencency"; break;
30885 case REG_DEP_TRUE: dep = "data dependency"; break;
30886 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30887 case REG_DEP_ANTI: dep = "anti depencency"; break;
30890 fprintf (stderr,
30891 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30892 "%s, insn:\n", ret, cost, dep);
30894 debug_rtx (insn);
30897 return ret;
30900 /* The function returns a true if INSN is microcoded.
30901 Return false otherwise. */
30903 static bool
30904 is_microcoded_insn (rtx_insn *insn)
30906 if (!insn || !NONDEBUG_INSN_P (insn)
30907 || GET_CODE (PATTERN (insn)) == USE
30908 || GET_CODE (PATTERN (insn)) == CLOBBER)
30909 return false;
30911 if (rs6000_cpu_attr == CPU_CELL)
30912 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30914 if (rs6000_sched_groups
30915 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30917 enum attr_type type = get_attr_type (insn);
30918 if ((type == TYPE_LOAD
30919 && get_attr_update (insn) == UPDATE_YES
30920 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30921 || ((type == TYPE_LOAD || type == TYPE_STORE)
30922 && get_attr_update (insn) == UPDATE_YES
30923 && get_attr_indexed (insn) == INDEXED_YES)
30924 || type == TYPE_MFCR)
30925 return true;
30928 return false;
30931 /* The function returns true if INSN is cracked into 2 instructions
30932 by the processor (and therefore occupies 2 issue slots). */
30934 static bool
30935 is_cracked_insn (rtx_insn *insn)
30937 if (!insn || !NONDEBUG_INSN_P (insn)
30938 || GET_CODE (PATTERN (insn)) == USE
30939 || GET_CODE (PATTERN (insn)) == CLOBBER)
30940 return false;
30942 if (rs6000_sched_groups
30943 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30945 enum attr_type type = get_attr_type (insn);
30946 if ((type == TYPE_LOAD
30947 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30948 && get_attr_update (insn) == UPDATE_NO)
30949 || (type == TYPE_LOAD
30950 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30951 && get_attr_update (insn) == UPDATE_YES
30952 && get_attr_indexed (insn) == INDEXED_NO)
30953 || (type == TYPE_STORE
30954 && get_attr_update (insn) == UPDATE_YES
30955 && get_attr_indexed (insn) == INDEXED_NO)
30956 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30957 && get_attr_update (insn) == UPDATE_YES)
30958 || type == TYPE_DELAYED_CR
30959 || (type == TYPE_EXTS
30960 && get_attr_dot (insn) == DOT_YES)
30961 || (type == TYPE_SHIFT
30962 && get_attr_dot (insn) == DOT_YES
30963 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30964 || (type == TYPE_MUL
30965 && get_attr_dot (insn) == DOT_YES)
30966 || type == TYPE_DIV
30967 || (type == TYPE_INSERT
30968 && get_attr_size (insn) == SIZE_32))
30969 return true;
30972 return false;
30975 /* The function returns true if INSN can be issued only from
30976 the branch slot. */
30978 static bool
30979 is_branch_slot_insn (rtx_insn *insn)
30981 if (!insn || !NONDEBUG_INSN_P (insn)
30982 || GET_CODE (PATTERN (insn)) == USE
30983 || GET_CODE (PATTERN (insn)) == CLOBBER)
30984 return false;
30986 if (rs6000_sched_groups)
30988 enum attr_type type = get_attr_type (insn);
30989 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30990 return true;
30991 return false;
30994 return false;
30997 /* The function returns true if out_inst sets a value that is
30998 used in the address generation computation of in_insn */
30999 static bool
31000 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
31002 rtx out_set, in_set;
31004 /* For performance reasons, only handle the simple case where
31005 both loads are a single_set. */
31006 out_set = single_set (out_insn);
31007 if (out_set)
31009 in_set = single_set (in_insn);
31010 if (in_set)
31011 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
31014 return false;
31017 /* Try to determine base/offset/size parts of the given MEM.
31018 Return true if successful, false if all the values couldn't
31019 be determined.
31021 This function only looks for REG or REG+CONST address forms.
31022 REG+REG address form will return false. */
31024 static bool
31025 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
31026 HOST_WIDE_INT *size)
31028 rtx addr_rtx;
31029 if MEM_SIZE_KNOWN_P (mem)
31030 *size = MEM_SIZE (mem);
31031 else
31032 return false;
31034 addr_rtx = (XEXP (mem, 0));
31035 if (GET_CODE (addr_rtx) == PRE_MODIFY)
31036 addr_rtx = XEXP (addr_rtx, 1);
31038 *offset = 0;
31039 while (GET_CODE (addr_rtx) == PLUS
31040 && CONST_INT_P (XEXP (addr_rtx, 1)))
31042 *offset += INTVAL (XEXP (addr_rtx, 1));
31043 addr_rtx = XEXP (addr_rtx, 0);
31045 if (!REG_P (addr_rtx))
31046 return false;
31048 *base = addr_rtx;
31049 return true;
31052 /* The function returns true if the target storage location of
31053 mem1 is adjacent to the target storage location of mem2 */
31054 /* Return 1 if memory locations are adjacent. */
31056 static bool
31057 adjacent_mem_locations (rtx mem1, rtx mem2)
31059 rtx reg1, reg2;
31060 HOST_WIDE_INT off1, size1, off2, size2;
31062 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31063 && get_memref_parts (mem2, &reg2, &off2, &size2))
31064 return ((REGNO (reg1) == REGNO (reg2))
31065 && ((off1 + size1 == off2)
31066 || (off2 + size2 == off1)));
31068 return false;
31071 /* This function returns true if it can be determined that the two MEM
31072 locations overlap by at least 1 byte based on base reg/offset/size. */
31074 static bool
31075 mem_locations_overlap (rtx mem1, rtx mem2)
31077 rtx reg1, reg2;
31078 HOST_WIDE_INT off1, size1, off2, size2;
31080 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31081 && get_memref_parts (mem2, &reg2, &off2, &size2))
31082 return ((REGNO (reg1) == REGNO (reg2))
31083 && (((off1 <= off2) && (off1 + size1 > off2))
31084 || ((off2 <= off1) && (off2 + size2 > off1))));
31086 return false;
31089 /* A C statement (sans semicolon) to update the integer scheduling
31090 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31091 INSN earlier, reduce the priority to execute INSN later. Do not
31092 define this macro if you do not need to adjust the scheduling
31093 priorities of insns. */
31095 static int
31096 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31098 rtx load_mem, str_mem;
31099 /* On machines (like the 750) which have asymmetric integer units,
31100 where one integer unit can do multiply and divides and the other
31101 can't, reduce the priority of multiply/divide so it is scheduled
31102 before other integer operations. */
31104 #if 0
31105 if (! INSN_P (insn))
31106 return priority;
31108 if (GET_CODE (PATTERN (insn)) == USE)
31109 return priority;
31111 switch (rs6000_cpu_attr) {
31112 case CPU_PPC750:
31113 switch (get_attr_type (insn))
31115 default:
31116 break;
31118 case TYPE_MUL:
31119 case TYPE_DIV:
31120 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31121 priority, priority);
31122 if (priority >= 0 && priority < 0x01000000)
31123 priority >>= 3;
31124 break;
31127 #endif
31129 if (insn_must_be_first_in_group (insn)
31130 && reload_completed
31131 && current_sched_info->sched_max_insns_priority
31132 && rs6000_sched_restricted_insns_priority)
31135 /* Prioritize insns that can be dispatched only in the first
31136 dispatch slot. */
31137 if (rs6000_sched_restricted_insns_priority == 1)
31138 /* Attach highest priority to insn. This means that in
31139 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31140 precede 'priority' (critical path) considerations. */
31141 return current_sched_info->sched_max_insns_priority;
31142 else if (rs6000_sched_restricted_insns_priority == 2)
31143 /* Increase priority of insn by a minimal amount. This means that in
31144 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31145 considerations precede dispatch-slot restriction considerations. */
31146 return (priority + 1);
31149 if (rs6000_cpu == PROCESSOR_POWER6
31150 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31151 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31152 /* Attach highest priority to insn if the scheduler has just issued two
31153 stores and this instruction is a load, or two loads and this instruction
31154 is a store. Power6 wants loads and stores scheduled alternately
31155 when possible */
31156 return current_sched_info->sched_max_insns_priority;
31158 return priority;
31161 /* Return true if the instruction is nonpipelined on the Cell. */
31162 static bool
31163 is_nonpipeline_insn (rtx_insn *insn)
31165 enum attr_type type;
31166 if (!insn || !NONDEBUG_INSN_P (insn)
31167 || GET_CODE (PATTERN (insn)) == USE
31168 || GET_CODE (PATTERN (insn)) == CLOBBER)
31169 return false;
31171 type = get_attr_type (insn);
31172 if (type == TYPE_MUL
31173 || type == TYPE_DIV
31174 || type == TYPE_SDIV
31175 || type == TYPE_DDIV
31176 || type == TYPE_SSQRT
31177 || type == TYPE_DSQRT
31178 || type == TYPE_MFCR
31179 || type == TYPE_MFCRF
31180 || type == TYPE_MFJMPR)
31182 return true;
31184 return false;
31188 /* Return how many instructions the machine can issue per cycle. */
31190 static int
31191 rs6000_issue_rate (void)
31193 /* Unless scheduling for register pressure, use issue rate of 1 for
31194 first scheduling pass to decrease degradation. */
31195 if (!reload_completed && !flag_sched_pressure)
31196 return 1;
31198 switch (rs6000_cpu_attr) {
31199 case CPU_RS64A:
31200 case CPU_PPC601: /* ? */
31201 case CPU_PPC7450:
31202 return 3;
31203 case CPU_PPC440:
31204 case CPU_PPC603:
31205 case CPU_PPC750:
31206 case CPU_PPC7400:
31207 case CPU_PPC8540:
31208 case CPU_PPC8548:
31209 case CPU_CELL:
31210 case CPU_PPCE300C2:
31211 case CPU_PPCE300C3:
31212 case CPU_PPCE500MC:
31213 case CPU_PPCE500MC64:
31214 case CPU_PPCE5500:
31215 case CPU_PPCE6500:
31216 case CPU_TITAN:
31217 return 2;
31218 case CPU_PPC476:
31219 case CPU_PPC604:
31220 case CPU_PPC604E:
31221 case CPU_PPC620:
31222 case CPU_PPC630:
31223 return 4;
31224 case CPU_POWER4:
31225 case CPU_POWER5:
31226 case CPU_POWER6:
31227 case CPU_POWER7:
31228 return 5;
31229 case CPU_POWER8:
31230 return 7;
31231 case CPU_POWER9:
31232 return 6;
31233 default:
31234 return 1;
31238 /* Return how many instructions to look ahead for better insn
31239 scheduling. */
31241 static int
31242 rs6000_use_sched_lookahead (void)
31244 switch (rs6000_cpu_attr)
31246 case CPU_PPC8540:
31247 case CPU_PPC8548:
31248 return 4;
31250 case CPU_CELL:
31251 return (reload_completed ? 8 : 0);
31253 default:
31254 return 0;
31258 /* We are choosing insn from the ready queue. Return zero if INSN can be
31259 chosen. */
31260 static int
31261 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31263 if (ready_index == 0)
31264 return 0;
31266 if (rs6000_cpu_attr != CPU_CELL)
31267 return 0;
31269 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31271 if (!reload_completed
31272 || is_nonpipeline_insn (insn)
31273 || is_microcoded_insn (insn))
31274 return 1;
31276 return 0;
31279 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31280 and return true. */
31282 static bool
31283 find_mem_ref (rtx pat, rtx *mem_ref)
31285 const char * fmt;
31286 int i, j;
31288 /* stack_tie does not produce any real memory traffic. */
31289 if (tie_operand (pat, VOIDmode))
31290 return false;
31292 if (GET_CODE (pat) == MEM)
31294 *mem_ref = pat;
31295 return true;
31298 /* Recursively process the pattern. */
31299 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31301 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31303 if (fmt[i] == 'e')
31305 if (find_mem_ref (XEXP (pat, i), mem_ref))
31306 return true;
31308 else if (fmt[i] == 'E')
31309 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31311 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31312 return true;
31316 return false;
31319 /* Determine if PAT is a PATTERN of a load insn. */
31321 static bool
31322 is_load_insn1 (rtx pat, rtx *load_mem)
31324 if (!pat || pat == NULL_RTX)
31325 return false;
31327 if (GET_CODE (pat) == SET)
31328 return find_mem_ref (SET_SRC (pat), load_mem);
31330 if (GET_CODE (pat) == PARALLEL)
31332 int i;
31334 for (i = 0; i < XVECLEN (pat, 0); i++)
31335 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31336 return true;
31339 return false;
31342 /* Determine if INSN loads from memory. */
31344 static bool
31345 is_load_insn (rtx insn, rtx *load_mem)
31347 if (!insn || !INSN_P (insn))
31348 return false;
31350 if (CALL_P (insn))
31351 return false;
31353 return is_load_insn1 (PATTERN (insn), load_mem);
31356 /* Determine if PAT is a PATTERN of a store insn. */
31358 static bool
31359 is_store_insn1 (rtx pat, rtx *str_mem)
31361 if (!pat || pat == NULL_RTX)
31362 return false;
31364 if (GET_CODE (pat) == SET)
31365 return find_mem_ref (SET_DEST (pat), str_mem);
31367 if (GET_CODE (pat) == PARALLEL)
31369 int i;
31371 for (i = 0; i < XVECLEN (pat, 0); i++)
31372 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31373 return true;
31376 return false;
31379 /* Determine if INSN stores to memory. */
31381 static bool
31382 is_store_insn (rtx insn, rtx *str_mem)
31384 if (!insn || !INSN_P (insn))
31385 return false;
31387 return is_store_insn1 (PATTERN (insn), str_mem);
31390 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31392 static bool
31393 is_power9_pairable_vec_type (enum attr_type type)
31395 switch (type)
31397 case TYPE_VECSIMPLE:
31398 case TYPE_VECCOMPLEX:
31399 case TYPE_VECDIV:
31400 case TYPE_VECCMP:
31401 case TYPE_VECPERM:
31402 case TYPE_VECFLOAT:
31403 case TYPE_VECFDIV:
31404 case TYPE_VECDOUBLE:
31405 return true;
31406 default:
31407 break;
31409 return false;
31412 /* Returns whether the dependence between INSN and NEXT is considered
31413 costly by the given target. */
31415 static bool
31416 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31418 rtx insn;
31419 rtx next;
31420 rtx load_mem, str_mem;
31422 /* If the flag is not enabled - no dependence is considered costly;
31423 allow all dependent insns in the same group.
31424 This is the most aggressive option. */
31425 if (rs6000_sched_costly_dep == no_dep_costly)
31426 return false;
31428 /* If the flag is set to 1 - a dependence is always considered costly;
31429 do not allow dependent instructions in the same group.
31430 This is the most conservative option. */
31431 if (rs6000_sched_costly_dep == all_deps_costly)
31432 return true;
31434 insn = DEP_PRO (dep);
31435 next = DEP_CON (dep);
31437 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31438 && is_load_insn (next, &load_mem)
31439 && is_store_insn (insn, &str_mem))
31440 /* Prevent load after store in the same group. */
31441 return true;
31443 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31444 && is_load_insn (next, &load_mem)
31445 && is_store_insn (insn, &str_mem)
31446 && DEP_TYPE (dep) == REG_DEP_TRUE
31447 && mem_locations_overlap(str_mem, load_mem))
31448 /* Prevent load after store in the same group if it is a true
31449 dependence. */
31450 return true;
31452 /* The flag is set to X; dependences with latency >= X are considered costly,
31453 and will not be scheduled in the same group. */
31454 if (rs6000_sched_costly_dep <= max_dep_latency
31455 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31456 return true;
31458 return false;
31461 /* Return the next insn after INSN that is found before TAIL is reached,
31462 skipping any "non-active" insns - insns that will not actually occupy
31463 an issue slot. Return NULL_RTX if such an insn is not found. */
31465 static rtx_insn *
31466 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31468 if (insn == NULL_RTX || insn == tail)
31469 return NULL;
31471 while (1)
31473 insn = NEXT_INSN (insn);
31474 if (insn == NULL_RTX || insn == tail)
31475 return NULL;
31477 if (CALL_P (insn)
31478 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31479 || (NONJUMP_INSN_P (insn)
31480 && GET_CODE (PATTERN (insn)) != USE
31481 && GET_CODE (PATTERN (insn)) != CLOBBER
31482 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31483 break;
31485 return insn;
31488 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31490 static int
31491 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31493 int pos;
31494 int i;
31495 rtx_insn *tmp;
31496 enum attr_type type, type2;
31498 type = get_attr_type (last_scheduled_insn);
31500 /* Try to issue fixed point divides back-to-back in pairs so they will be
31501 routed to separate execution units and execute in parallel. */
31502 if (type == TYPE_DIV && divide_cnt == 0)
31504 /* First divide has been scheduled. */
31505 divide_cnt = 1;
31507 /* Scan the ready list looking for another divide, if found move it
31508 to the end of the list so it is chosen next. */
31509 pos = lastpos;
31510 while (pos >= 0)
31512 if (recog_memoized (ready[pos]) >= 0
31513 && get_attr_type (ready[pos]) == TYPE_DIV)
31515 tmp = ready[pos];
31516 for (i = pos; i < lastpos; i++)
31517 ready[i] = ready[i + 1];
31518 ready[lastpos] = tmp;
31519 break;
31521 pos--;
31524 else
31526 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31527 divide_cnt = 0;
31529 /* The best dispatch throughput for vector and vector load insns can be
31530 achieved by interleaving a vector and vector load such that they'll
31531 dispatch to the same superslice. If this pairing cannot be achieved
31532 then it is best to pair vector insns together and vector load insns
31533 together.
31535 To aid in this pairing, vec_pairing maintains the current state with
31536 the following values:
31538 0 : Initial state, no vecload/vector pairing has been started.
31540 1 : A vecload or vector insn has been issued and a candidate for
31541 pairing has been found and moved to the end of the ready
31542 list. */
31543 if (type == TYPE_VECLOAD)
31545 /* Issued a vecload. */
31546 if (vec_pairing == 0)
31548 int vecload_pos = -1;
31549 /* We issued a single vecload, look for a vector insn to pair it
31550 with. If one isn't found, try to pair another vecload. */
31551 pos = lastpos;
31552 while (pos >= 0)
31554 if (recog_memoized (ready[pos]) >= 0)
31556 type2 = get_attr_type (ready[pos]);
31557 if (is_power9_pairable_vec_type (type2))
31559 /* Found a vector insn to pair with, move it to the
31560 end of the ready list so it is scheduled next. */
31561 tmp = ready[pos];
31562 for (i = pos; i < lastpos; i++)
31563 ready[i] = ready[i + 1];
31564 ready[lastpos] = tmp;
31565 vec_pairing = 1;
31566 return cached_can_issue_more;
31568 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31569 /* Remember position of first vecload seen. */
31570 vecload_pos = pos;
31572 pos--;
31574 if (vecload_pos >= 0)
31576 /* Didn't find a vector to pair with but did find a vecload,
31577 move it to the end of the ready list. */
31578 tmp = ready[vecload_pos];
31579 for (i = vecload_pos; i < lastpos; i++)
31580 ready[i] = ready[i + 1];
31581 ready[lastpos] = tmp;
31582 vec_pairing = 1;
31583 return cached_can_issue_more;
31587 else if (is_power9_pairable_vec_type (type))
31589 /* Issued a vector operation. */
31590 if (vec_pairing == 0)
31592 int vec_pos = -1;
31593 /* We issued a single vector insn, look for a vecload to pair it
31594 with. If one isn't found, try to pair another vector. */
31595 pos = lastpos;
31596 while (pos >= 0)
31598 if (recog_memoized (ready[pos]) >= 0)
31600 type2 = get_attr_type (ready[pos]);
31601 if (type2 == TYPE_VECLOAD)
31603 /* Found a vecload insn to pair with, move it to the
31604 end of the ready list so it is scheduled next. */
31605 tmp = ready[pos];
31606 for (i = pos; i < lastpos; i++)
31607 ready[i] = ready[i + 1];
31608 ready[lastpos] = tmp;
31609 vec_pairing = 1;
31610 return cached_can_issue_more;
31612 else if (is_power9_pairable_vec_type (type2)
31613 && vec_pos == -1)
31614 /* Remember position of first vector insn seen. */
31615 vec_pos = pos;
31617 pos--;
31619 if (vec_pos >= 0)
31621 /* Didn't find a vecload to pair with but did find a vector
31622 insn, move it to the end of the ready list. */
31623 tmp = ready[vec_pos];
31624 for (i = vec_pos; i < lastpos; i++)
31625 ready[i] = ready[i + 1];
31626 ready[lastpos] = tmp;
31627 vec_pairing = 1;
31628 return cached_can_issue_more;
31633 /* We've either finished a vec/vecload pair, couldn't find an insn to
31634 continue the current pair, or the last insn had nothing to do with
31635 with pairing. In any case, reset the state. */
31636 vec_pairing = 0;
31639 return cached_can_issue_more;
31642 /* We are about to begin issuing insns for this clock cycle. */
31644 static int
31645 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31646 rtx_insn **ready ATTRIBUTE_UNUSED,
31647 int *pn_ready ATTRIBUTE_UNUSED,
31648 int clock_var ATTRIBUTE_UNUSED)
31650 int n_ready = *pn_ready;
31652 if (sched_verbose)
31653 fprintf (dump, "// rs6000_sched_reorder :\n");
31655 /* Reorder the ready list, if the second to last ready insn
31656 is a nonepipeline insn. */
31657 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31659 if (is_nonpipeline_insn (ready[n_ready - 1])
31660 && (recog_memoized (ready[n_ready - 2]) > 0))
31661 /* Simply swap first two insns. */
31662 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31665 if (rs6000_cpu == PROCESSOR_POWER6)
31666 load_store_pendulum = 0;
31668 return rs6000_issue_rate ();
31671 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31673 static int
31674 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31675 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31677 if (sched_verbose)
31678 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31680 /* For Power6, we need to handle some special cases to try and keep the
31681 store queue from overflowing and triggering expensive flushes.
31683 This code monitors how load and store instructions are being issued
31684 and skews the ready list one way or the other to increase the likelihood
31685 that a desired instruction is issued at the proper time.
31687 A couple of things are done. First, we maintain a "load_store_pendulum"
31688 to track the current state of load/store issue.
31690 - If the pendulum is at zero, then no loads or stores have been
31691 issued in the current cycle so we do nothing.
31693 - If the pendulum is 1, then a single load has been issued in this
31694 cycle and we attempt to locate another load in the ready list to
31695 issue with it.
31697 - If the pendulum is -2, then two stores have already been
31698 issued in this cycle, so we increase the priority of the first load
31699 in the ready list to increase it's likelihood of being chosen first
31700 in the next cycle.
31702 - If the pendulum is -1, then a single store has been issued in this
31703 cycle and we attempt to locate another store in the ready list to
31704 issue with it, preferring a store to an adjacent memory location to
31705 facilitate store pairing in the store queue.
31707 - If the pendulum is 2, then two loads have already been
31708 issued in this cycle, so we increase the priority of the first store
31709 in the ready list to increase it's likelihood of being chosen first
31710 in the next cycle.
31712 - If the pendulum < -2 or > 2, then do nothing.
31714 Note: This code covers the most common scenarios. There exist non
31715 load/store instructions which make use of the LSU and which
31716 would need to be accounted for to strictly model the behavior
31717 of the machine. Those instructions are currently unaccounted
31718 for to help minimize compile time overhead of this code.
31720 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31722 int pos;
31723 int i;
31724 rtx_insn *tmp;
31725 rtx load_mem, str_mem;
31727 if (is_store_insn (last_scheduled_insn, &str_mem))
31728 /* Issuing a store, swing the load_store_pendulum to the left */
31729 load_store_pendulum--;
31730 else if (is_load_insn (last_scheduled_insn, &load_mem))
31731 /* Issuing a load, swing the load_store_pendulum to the right */
31732 load_store_pendulum++;
31733 else
31734 return cached_can_issue_more;
31736 /* If the pendulum is balanced, or there is only one instruction on
31737 the ready list, then all is well, so return. */
31738 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31739 return cached_can_issue_more;
31741 if (load_store_pendulum == 1)
31743 /* A load has been issued in this cycle. Scan the ready list
31744 for another load to issue with it */
31745 pos = *pn_ready-1;
31747 while (pos >= 0)
31749 if (is_load_insn (ready[pos], &load_mem))
31751 /* Found a load. Move it to the head of the ready list,
31752 and adjust it's priority so that it is more likely to
31753 stay there */
31754 tmp = ready[pos];
31755 for (i=pos; i<*pn_ready-1; i++)
31756 ready[i] = ready[i + 1];
31757 ready[*pn_ready-1] = tmp;
31759 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31760 INSN_PRIORITY (tmp)++;
31761 break;
31763 pos--;
31766 else if (load_store_pendulum == -2)
31768 /* Two stores have been issued in this cycle. Increase the
31769 priority of the first load in the ready list to favor it for
31770 issuing in the next cycle. */
31771 pos = *pn_ready-1;
31773 while (pos >= 0)
31775 if (is_load_insn (ready[pos], &load_mem)
31776 && !sel_sched_p ()
31777 && INSN_PRIORITY_KNOWN (ready[pos]))
31779 INSN_PRIORITY (ready[pos])++;
31781 /* Adjust the pendulum to account for the fact that a load
31782 was found and increased in priority. This is to prevent
31783 increasing the priority of multiple loads */
31784 load_store_pendulum--;
31786 break;
31788 pos--;
31791 else if (load_store_pendulum == -1)
31793 /* A store has been issued in this cycle. Scan the ready list for
31794 another store to issue with it, preferring a store to an adjacent
31795 memory location */
31796 int first_store_pos = -1;
31798 pos = *pn_ready-1;
31800 while (pos >= 0)
31802 if (is_store_insn (ready[pos], &str_mem))
31804 rtx str_mem2;
31805 /* Maintain the index of the first store found on the
31806 list */
31807 if (first_store_pos == -1)
31808 first_store_pos = pos;
31810 if (is_store_insn (last_scheduled_insn, &str_mem2)
31811 && adjacent_mem_locations (str_mem, str_mem2))
31813 /* Found an adjacent store. Move it to the head of the
31814 ready list, and adjust it's priority so that it is
31815 more likely to stay there */
31816 tmp = ready[pos];
31817 for (i=pos; i<*pn_ready-1; i++)
31818 ready[i] = ready[i + 1];
31819 ready[*pn_ready-1] = tmp;
31821 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31822 INSN_PRIORITY (tmp)++;
31824 first_store_pos = -1;
31826 break;
31829 pos--;
31832 if (first_store_pos >= 0)
31834 /* An adjacent store wasn't found, but a non-adjacent store was,
31835 so move the non-adjacent store to the front of the ready
31836 list, and adjust its priority so that it is more likely to
31837 stay there. */
31838 tmp = ready[first_store_pos];
31839 for (i=first_store_pos; i<*pn_ready-1; i++)
31840 ready[i] = ready[i + 1];
31841 ready[*pn_ready-1] = tmp;
31842 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31843 INSN_PRIORITY (tmp)++;
31846 else if (load_store_pendulum == 2)
31848 /* Two loads have been issued in this cycle. Increase the priority
31849 of the first store in the ready list to favor it for issuing in
31850 the next cycle. */
31851 pos = *pn_ready-1;
31853 while (pos >= 0)
31855 if (is_store_insn (ready[pos], &str_mem)
31856 && !sel_sched_p ()
31857 && INSN_PRIORITY_KNOWN (ready[pos]))
31859 INSN_PRIORITY (ready[pos])++;
31861 /* Adjust the pendulum to account for the fact that a store
31862 was found and increased in priority. This is to prevent
31863 increasing the priority of multiple stores */
31864 load_store_pendulum++;
31866 break;
31868 pos--;
31873 /* Do Power9 dependent reordering if necessary. */
31874 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31875 && recog_memoized (last_scheduled_insn) >= 0)
31876 return power9_sched_reorder2 (ready, *pn_ready - 1);
31878 return cached_can_issue_more;
31881 /* Return whether the presence of INSN causes a dispatch group termination
31882 of group WHICH_GROUP.
31884 If WHICH_GROUP == current_group, this function will return true if INSN
31885 causes the termination of the current group (i.e, the dispatch group to
31886 which INSN belongs). This means that INSN will be the last insn in the
31887 group it belongs to.
31889 If WHICH_GROUP == previous_group, this function will return true if INSN
31890 causes the termination of the previous group (i.e, the dispatch group that
31891 precedes the group to which INSN belongs). This means that INSN will be
31892 the first insn in the group it belongs to). */
31894 static bool
31895 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31897 bool first, last;
31899 if (! insn)
31900 return false;
31902 first = insn_must_be_first_in_group (insn);
31903 last = insn_must_be_last_in_group (insn);
31905 if (first && last)
31906 return true;
31908 if (which_group == current_group)
31909 return last;
31910 else if (which_group == previous_group)
31911 return first;
31913 return false;
31917 static bool
31918 insn_must_be_first_in_group (rtx_insn *insn)
31920 enum attr_type type;
31922 if (!insn
31923 || NOTE_P (insn)
31924 || DEBUG_INSN_P (insn)
31925 || GET_CODE (PATTERN (insn)) == USE
31926 || GET_CODE (PATTERN (insn)) == CLOBBER)
31927 return false;
31929 switch (rs6000_cpu)
31931 case PROCESSOR_POWER5:
31932 if (is_cracked_insn (insn))
31933 return true;
31934 /* FALLTHRU */
31935 case PROCESSOR_POWER4:
31936 if (is_microcoded_insn (insn))
31937 return true;
31939 if (!rs6000_sched_groups)
31940 return false;
31942 type = get_attr_type (insn);
31944 switch (type)
31946 case TYPE_MFCR:
31947 case TYPE_MFCRF:
31948 case TYPE_MTCR:
31949 case TYPE_DELAYED_CR:
31950 case TYPE_CR_LOGICAL:
31951 case TYPE_MTJMPR:
31952 case TYPE_MFJMPR:
31953 case TYPE_DIV:
31954 case TYPE_LOAD_L:
31955 case TYPE_STORE_C:
31956 case TYPE_ISYNC:
31957 case TYPE_SYNC:
31958 return true;
31959 default:
31960 break;
31962 break;
31963 case PROCESSOR_POWER6:
31964 type = get_attr_type (insn);
31966 switch (type)
31968 case TYPE_EXTS:
31969 case TYPE_CNTLZ:
31970 case TYPE_TRAP:
31971 case TYPE_MUL:
31972 case TYPE_INSERT:
31973 case TYPE_FPCOMPARE:
31974 case TYPE_MFCR:
31975 case TYPE_MTCR:
31976 case TYPE_MFJMPR:
31977 case TYPE_MTJMPR:
31978 case TYPE_ISYNC:
31979 case TYPE_SYNC:
31980 case TYPE_LOAD_L:
31981 case TYPE_STORE_C:
31982 return true;
31983 case TYPE_SHIFT:
31984 if (get_attr_dot (insn) == DOT_NO
31985 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31986 return true;
31987 else
31988 break;
31989 case TYPE_DIV:
31990 if (get_attr_size (insn) == SIZE_32)
31991 return true;
31992 else
31993 break;
31994 case TYPE_LOAD:
31995 case TYPE_STORE:
31996 case TYPE_FPLOAD:
31997 case TYPE_FPSTORE:
31998 if (get_attr_update (insn) == UPDATE_YES)
31999 return true;
32000 else
32001 break;
32002 default:
32003 break;
32005 break;
32006 case PROCESSOR_POWER7:
32007 type = get_attr_type (insn);
32009 switch (type)
32011 case TYPE_CR_LOGICAL:
32012 case TYPE_MFCR:
32013 case TYPE_MFCRF:
32014 case TYPE_MTCR:
32015 case TYPE_DIV:
32016 case TYPE_ISYNC:
32017 case TYPE_LOAD_L:
32018 case TYPE_STORE_C:
32019 case TYPE_MFJMPR:
32020 case TYPE_MTJMPR:
32021 return true;
32022 case TYPE_MUL:
32023 case TYPE_SHIFT:
32024 case TYPE_EXTS:
32025 if (get_attr_dot (insn) == DOT_YES)
32026 return true;
32027 else
32028 break;
32029 case TYPE_LOAD:
32030 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32031 || get_attr_update (insn) == UPDATE_YES)
32032 return true;
32033 else
32034 break;
32035 case TYPE_STORE:
32036 case TYPE_FPLOAD:
32037 case TYPE_FPSTORE:
32038 if (get_attr_update (insn) == UPDATE_YES)
32039 return true;
32040 else
32041 break;
32042 default:
32043 break;
32045 break;
32046 case PROCESSOR_POWER8:
32047 type = get_attr_type (insn);
32049 switch (type)
32051 case TYPE_CR_LOGICAL:
32052 case TYPE_DELAYED_CR:
32053 case TYPE_MFCR:
32054 case TYPE_MFCRF:
32055 case TYPE_MTCR:
32056 case TYPE_SYNC:
32057 case TYPE_ISYNC:
32058 case TYPE_LOAD_L:
32059 case TYPE_STORE_C:
32060 case TYPE_VECSTORE:
32061 case TYPE_MFJMPR:
32062 case TYPE_MTJMPR:
32063 return true;
32064 case TYPE_SHIFT:
32065 case TYPE_EXTS:
32066 case TYPE_MUL:
32067 if (get_attr_dot (insn) == DOT_YES)
32068 return true;
32069 else
32070 break;
32071 case TYPE_LOAD:
32072 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32073 || get_attr_update (insn) == UPDATE_YES)
32074 return true;
32075 else
32076 break;
32077 case TYPE_STORE:
32078 if (get_attr_update (insn) == UPDATE_YES
32079 && get_attr_indexed (insn) == INDEXED_YES)
32080 return true;
32081 else
32082 break;
32083 default:
32084 break;
32086 break;
32087 default:
32088 break;
32091 return false;
32094 static bool
32095 insn_must_be_last_in_group (rtx_insn *insn)
32097 enum attr_type type;
32099 if (!insn
32100 || NOTE_P (insn)
32101 || DEBUG_INSN_P (insn)
32102 || GET_CODE (PATTERN (insn)) == USE
32103 || GET_CODE (PATTERN (insn)) == CLOBBER)
32104 return false;
32106 switch (rs6000_cpu) {
32107 case PROCESSOR_POWER4:
32108 case PROCESSOR_POWER5:
32109 if (is_microcoded_insn (insn))
32110 return true;
32112 if (is_branch_slot_insn (insn))
32113 return true;
32115 break;
32116 case PROCESSOR_POWER6:
32117 type = get_attr_type (insn);
32119 switch (type)
32121 case TYPE_EXTS:
32122 case TYPE_CNTLZ:
32123 case TYPE_TRAP:
32124 case TYPE_MUL:
32125 case TYPE_FPCOMPARE:
32126 case TYPE_MFCR:
32127 case TYPE_MTCR:
32128 case TYPE_MFJMPR:
32129 case TYPE_MTJMPR:
32130 case TYPE_ISYNC:
32131 case TYPE_SYNC:
32132 case TYPE_LOAD_L:
32133 case TYPE_STORE_C:
32134 return true;
32135 case TYPE_SHIFT:
32136 if (get_attr_dot (insn) == DOT_NO
32137 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32138 return true;
32139 else
32140 break;
32141 case TYPE_DIV:
32142 if (get_attr_size (insn) == SIZE_32)
32143 return true;
32144 else
32145 break;
32146 default:
32147 break;
32149 break;
32150 case PROCESSOR_POWER7:
32151 type = get_attr_type (insn);
32153 switch (type)
32155 case TYPE_ISYNC:
32156 case TYPE_SYNC:
32157 case TYPE_LOAD_L:
32158 case TYPE_STORE_C:
32159 return true;
32160 case TYPE_LOAD:
32161 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32162 && get_attr_update (insn) == UPDATE_YES)
32163 return true;
32164 else
32165 break;
32166 case TYPE_STORE:
32167 if (get_attr_update (insn) == UPDATE_YES
32168 && get_attr_indexed (insn) == INDEXED_YES)
32169 return true;
32170 else
32171 break;
32172 default:
32173 break;
32175 break;
32176 case PROCESSOR_POWER8:
32177 type = get_attr_type (insn);
32179 switch (type)
32181 case TYPE_MFCR:
32182 case TYPE_MTCR:
32183 case TYPE_ISYNC:
32184 case TYPE_SYNC:
32185 case TYPE_LOAD_L:
32186 case TYPE_STORE_C:
32187 return true;
32188 case TYPE_LOAD:
32189 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32190 && get_attr_update (insn) == UPDATE_YES)
32191 return true;
32192 else
32193 break;
32194 case TYPE_STORE:
32195 if (get_attr_update (insn) == UPDATE_YES
32196 && get_attr_indexed (insn) == INDEXED_YES)
32197 return true;
32198 else
32199 break;
32200 default:
32201 break;
32203 break;
32204 default:
32205 break;
32208 return false;
32211 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32212 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32214 static bool
32215 is_costly_group (rtx *group_insns, rtx next_insn)
32217 int i;
32218 int issue_rate = rs6000_issue_rate ();
32220 for (i = 0; i < issue_rate; i++)
32222 sd_iterator_def sd_it;
32223 dep_t dep;
32224 rtx insn = group_insns[i];
32226 if (!insn)
32227 continue;
32229 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32231 rtx next = DEP_CON (dep);
32233 if (next == next_insn
32234 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32235 return true;
32239 return false;
32242 /* Utility of the function redefine_groups.
32243 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32244 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32245 to keep it "far" (in a separate group) from GROUP_INSNS, following
32246 one of the following schemes, depending on the value of the flag
32247 -minsert_sched_nops = X:
32248 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32249 in order to force NEXT_INSN into a separate group.
32250 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32251 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32252 insertion (has a group just ended, how many vacant issue slots remain in the
32253 last group, and how many dispatch groups were encountered so far). */
32255 static int
32256 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32257 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32258 int *group_count)
32260 rtx nop;
32261 bool force;
32262 int issue_rate = rs6000_issue_rate ();
32263 bool end = *group_end;
32264 int i;
32266 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32267 return can_issue_more;
32269 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32270 return can_issue_more;
32272 force = is_costly_group (group_insns, next_insn);
32273 if (!force)
32274 return can_issue_more;
32276 if (sched_verbose > 6)
32277 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32278 *group_count ,can_issue_more);
32280 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32282 if (*group_end)
32283 can_issue_more = 0;
32285 /* Since only a branch can be issued in the last issue_slot, it is
32286 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32287 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32288 in this case the last nop will start a new group and the branch
32289 will be forced to the new group. */
32290 if (can_issue_more && !is_branch_slot_insn (next_insn))
32291 can_issue_more--;
32293 /* Do we have a special group ending nop? */
32294 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
32295 || rs6000_cpu_attr == CPU_POWER8)
32297 nop = gen_group_ending_nop ();
32298 emit_insn_before (nop, next_insn);
32299 can_issue_more = 0;
32301 else
32302 while (can_issue_more > 0)
32304 nop = gen_nop ();
32305 emit_insn_before (nop, next_insn);
32306 can_issue_more--;
32309 *group_end = true;
32310 return 0;
32313 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32315 int n_nops = rs6000_sched_insert_nops;
32317 /* Nops can't be issued from the branch slot, so the effective
32318 issue_rate for nops is 'issue_rate - 1'. */
32319 if (can_issue_more == 0)
32320 can_issue_more = issue_rate;
32321 can_issue_more--;
32322 if (can_issue_more == 0)
32324 can_issue_more = issue_rate - 1;
32325 (*group_count)++;
32326 end = true;
32327 for (i = 0; i < issue_rate; i++)
32329 group_insns[i] = 0;
32333 while (n_nops > 0)
32335 nop = gen_nop ();
32336 emit_insn_before (nop, next_insn);
32337 if (can_issue_more == issue_rate - 1) /* new group begins */
32338 end = false;
32339 can_issue_more--;
32340 if (can_issue_more == 0)
32342 can_issue_more = issue_rate - 1;
32343 (*group_count)++;
32344 end = true;
32345 for (i = 0; i < issue_rate; i++)
32347 group_insns[i] = 0;
32350 n_nops--;
32353 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32354 can_issue_more++;
32356 /* Is next_insn going to start a new group? */
32357 *group_end
32358 = (end
32359 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32360 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32361 || (can_issue_more < issue_rate &&
32362 insn_terminates_group_p (next_insn, previous_group)));
32363 if (*group_end && end)
32364 (*group_count)--;
32366 if (sched_verbose > 6)
32367 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32368 *group_count, can_issue_more);
32369 return can_issue_more;
32372 return can_issue_more;
32375 /* This function tries to synch the dispatch groups that the compiler "sees"
32376 with the dispatch groups that the processor dispatcher is expected to
32377 form in practice. It tries to achieve this synchronization by forcing the
32378 estimated processor grouping on the compiler (as opposed to the function
32379 'pad_goups' which tries to force the scheduler's grouping on the processor).
32381 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32382 examines the (estimated) dispatch groups that will be formed by the processor
32383 dispatcher. It marks these group boundaries to reflect the estimated
32384 processor grouping, overriding the grouping that the scheduler had marked.
32385 Depending on the value of the flag '-minsert-sched-nops' this function can
32386 force certain insns into separate groups or force a certain distance between
32387 them by inserting nops, for example, if there exists a "costly dependence"
32388 between the insns.
32390 The function estimates the group boundaries that the processor will form as
32391 follows: It keeps track of how many vacant issue slots are available after
32392 each insn. A subsequent insn will start a new group if one of the following
32393 4 cases applies:
32394 - no more vacant issue slots remain in the current dispatch group.
32395 - only the last issue slot, which is the branch slot, is vacant, but the next
32396 insn is not a branch.
32397 - only the last 2 or less issue slots, including the branch slot, are vacant,
32398 which means that a cracked insn (which occupies two issue slots) can't be
32399 issued in this group.
32400 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32401 start a new group. */
32403 static int
32404 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32405 rtx_insn *tail)
32407 rtx_insn *insn, *next_insn;
32408 int issue_rate;
32409 int can_issue_more;
32410 int slot, i;
32411 bool group_end;
32412 int group_count = 0;
32413 rtx *group_insns;
32415 /* Initialize. */
32416 issue_rate = rs6000_issue_rate ();
32417 group_insns = XALLOCAVEC (rtx, issue_rate);
32418 for (i = 0; i < issue_rate; i++)
32420 group_insns[i] = 0;
32422 can_issue_more = issue_rate;
32423 slot = 0;
32424 insn = get_next_active_insn (prev_head_insn, tail);
32425 group_end = false;
32427 while (insn != NULL_RTX)
32429 slot = (issue_rate - can_issue_more);
32430 group_insns[slot] = insn;
32431 can_issue_more =
32432 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32433 if (insn_terminates_group_p (insn, current_group))
32434 can_issue_more = 0;
32436 next_insn = get_next_active_insn (insn, tail);
32437 if (next_insn == NULL_RTX)
32438 return group_count + 1;
32440 /* Is next_insn going to start a new group? */
32441 group_end
32442 = (can_issue_more == 0
32443 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32444 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32445 || (can_issue_more < issue_rate &&
32446 insn_terminates_group_p (next_insn, previous_group)));
32448 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32449 next_insn, &group_end, can_issue_more,
32450 &group_count);
32452 if (group_end)
32454 group_count++;
32455 can_issue_more = 0;
32456 for (i = 0; i < issue_rate; i++)
32458 group_insns[i] = 0;
32462 if (GET_MODE (next_insn) == TImode && can_issue_more)
32463 PUT_MODE (next_insn, VOIDmode);
32464 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32465 PUT_MODE (next_insn, TImode);
32467 insn = next_insn;
32468 if (can_issue_more == 0)
32469 can_issue_more = issue_rate;
32470 } /* while */
32472 return group_count;
32475 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32476 dispatch group boundaries that the scheduler had marked. Pad with nops
32477 any dispatch groups which have vacant issue slots, in order to force the
32478 scheduler's grouping on the processor dispatcher. The function
32479 returns the number of dispatch groups found. */
32481 static int
32482 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32483 rtx_insn *tail)
32485 rtx_insn *insn, *next_insn;
32486 rtx nop;
32487 int issue_rate;
32488 int can_issue_more;
32489 int group_end;
32490 int group_count = 0;
32492 /* Initialize issue_rate. */
32493 issue_rate = rs6000_issue_rate ();
32494 can_issue_more = issue_rate;
32496 insn = get_next_active_insn (prev_head_insn, tail);
32497 next_insn = get_next_active_insn (insn, tail);
32499 while (insn != NULL_RTX)
32501 can_issue_more =
32502 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32504 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32506 if (next_insn == NULL_RTX)
32507 break;
32509 if (group_end)
32511 /* If the scheduler had marked group termination at this location
32512 (between insn and next_insn), and neither insn nor next_insn will
32513 force group termination, pad the group with nops to force group
32514 termination. */
32515 if (can_issue_more
32516 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32517 && !insn_terminates_group_p (insn, current_group)
32518 && !insn_terminates_group_p (next_insn, previous_group))
32520 if (!is_branch_slot_insn (next_insn))
32521 can_issue_more--;
32523 while (can_issue_more)
32525 nop = gen_nop ();
32526 emit_insn_before (nop, next_insn);
32527 can_issue_more--;
32531 can_issue_more = issue_rate;
32532 group_count++;
32535 insn = next_insn;
32536 next_insn = get_next_active_insn (insn, tail);
32539 return group_count;
32542 /* We're beginning a new block. Initialize data structures as necessary. */
32544 static void
32545 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32546 int sched_verbose ATTRIBUTE_UNUSED,
32547 int max_ready ATTRIBUTE_UNUSED)
32549 last_scheduled_insn = NULL;
32550 load_store_pendulum = 0;
32551 divide_cnt = 0;
32552 vec_pairing = 0;
32555 /* The following function is called at the end of scheduling BB.
32556 After reload, it inserts nops at insn group bundling. */
32558 static void
32559 rs6000_sched_finish (FILE *dump, int sched_verbose)
32561 int n_groups;
32563 if (sched_verbose)
32564 fprintf (dump, "=== Finishing schedule.\n");
32566 if (reload_completed && rs6000_sched_groups)
32568 /* Do not run sched_finish hook when selective scheduling enabled. */
32569 if (sel_sched_p ())
32570 return;
32572 if (rs6000_sched_insert_nops == sched_finish_none)
32573 return;
32575 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32576 n_groups = pad_groups (dump, sched_verbose,
32577 current_sched_info->prev_head,
32578 current_sched_info->next_tail);
32579 else
32580 n_groups = redefine_groups (dump, sched_verbose,
32581 current_sched_info->prev_head,
32582 current_sched_info->next_tail);
32584 if (sched_verbose >= 6)
32586 fprintf (dump, "ngroups = %d\n", n_groups);
32587 print_rtl (dump, current_sched_info->prev_head);
32588 fprintf (dump, "Done finish_sched\n");
32593 struct rs6000_sched_context
32595 short cached_can_issue_more;
32596 rtx_insn *last_scheduled_insn;
32597 int load_store_pendulum;
32598 int divide_cnt;
32599 int vec_pairing;
32602 typedef struct rs6000_sched_context rs6000_sched_context_def;
32603 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32605 /* Allocate store for new scheduling context. */
32606 static void *
32607 rs6000_alloc_sched_context (void)
32609 return xmalloc (sizeof (rs6000_sched_context_def));
32612 /* If CLEAN_P is true then initializes _SC with clean data,
32613 and from the global context otherwise. */
32614 static void
32615 rs6000_init_sched_context (void *_sc, bool clean_p)
32617 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32619 if (clean_p)
32621 sc->cached_can_issue_more = 0;
32622 sc->last_scheduled_insn = NULL;
32623 sc->load_store_pendulum = 0;
32624 sc->divide_cnt = 0;
32625 sc->vec_pairing = 0;
32627 else
32629 sc->cached_can_issue_more = cached_can_issue_more;
32630 sc->last_scheduled_insn = last_scheduled_insn;
32631 sc->load_store_pendulum = load_store_pendulum;
32632 sc->divide_cnt = divide_cnt;
32633 sc->vec_pairing = vec_pairing;
32637 /* Sets the global scheduling context to the one pointed to by _SC. */
32638 static void
32639 rs6000_set_sched_context (void *_sc)
32641 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32643 gcc_assert (sc != NULL);
32645 cached_can_issue_more = sc->cached_can_issue_more;
32646 last_scheduled_insn = sc->last_scheduled_insn;
32647 load_store_pendulum = sc->load_store_pendulum;
32648 divide_cnt = sc->divide_cnt;
32649 vec_pairing = sc->vec_pairing;
32652 /* Free _SC. */
32653 static void
32654 rs6000_free_sched_context (void *_sc)
32656 gcc_assert (_sc != NULL);
32658 free (_sc);
32661 static bool
32662 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32664 switch (get_attr_type (insn))
32666 case TYPE_DIV:
32667 case TYPE_SDIV:
32668 case TYPE_DDIV:
32669 case TYPE_VECDIV:
32670 case TYPE_SSQRT:
32671 case TYPE_DSQRT:
32672 return false;
32674 default:
32675 return true;
32679 /* Length in units of the trampoline for entering a nested function. */
32682 rs6000_trampoline_size (void)
32684 int ret = 0;
32686 switch (DEFAULT_ABI)
32688 default:
32689 gcc_unreachable ();
32691 case ABI_AIX:
32692 ret = (TARGET_32BIT) ? 12 : 24;
32693 break;
32695 case ABI_ELFv2:
32696 gcc_assert (!TARGET_32BIT);
32697 ret = 32;
32698 break;
32700 case ABI_DARWIN:
32701 case ABI_V4:
32702 ret = (TARGET_32BIT) ? 40 : 48;
32703 break;
32706 return ret;
32709 /* Emit RTL insns to initialize the variable parts of a trampoline.
32710 FNADDR is an RTX for the address of the function's pure code.
32711 CXT is an RTX for the static chain value for the function. */
32713 static void
32714 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32716 int regsize = (TARGET_32BIT) ? 4 : 8;
32717 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32718 rtx ctx_reg = force_reg (Pmode, cxt);
32719 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32721 switch (DEFAULT_ABI)
32723 default:
32724 gcc_unreachable ();
32726 /* Under AIX, just build the 3 word function descriptor */
32727 case ABI_AIX:
32729 rtx fnmem, fn_reg, toc_reg;
32731 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32732 error ("you cannot take the address of a nested function if you use "
32733 "the %qs option", "-mno-pointers-to-nested-functions");
32735 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32736 fn_reg = gen_reg_rtx (Pmode);
32737 toc_reg = gen_reg_rtx (Pmode);
32739 /* Macro to shorten the code expansions below. */
32740 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32742 m_tramp = replace_equiv_address (m_tramp, addr);
32744 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32745 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32746 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32747 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32748 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32750 # undef MEM_PLUS
32752 break;
32754 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32755 case ABI_ELFv2:
32756 case ABI_DARWIN:
32757 case ABI_V4:
32758 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32759 LCT_NORMAL, VOIDmode,
32760 addr, Pmode,
32761 GEN_INT (rs6000_trampoline_size ()), SImode,
32762 fnaddr, Pmode,
32763 ctx_reg, Pmode);
32764 break;
32769 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32770 identifier as an argument, so the front end shouldn't look it up. */
32772 static bool
32773 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32775 return is_attribute_p ("altivec", attr_id);
32778 /* Handle the "altivec" attribute. The attribute may have
32779 arguments as follows:
32781 __attribute__((altivec(vector__)))
32782 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32783 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32785 and may appear more than once (e.g., 'vector bool char') in a
32786 given declaration. */
32788 static tree
32789 rs6000_handle_altivec_attribute (tree *node,
32790 tree name ATTRIBUTE_UNUSED,
32791 tree args,
32792 int flags ATTRIBUTE_UNUSED,
32793 bool *no_add_attrs)
32795 tree type = *node, result = NULL_TREE;
32796 machine_mode mode;
32797 int unsigned_p;
32798 char altivec_type
32799 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32800 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32801 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32802 : '?');
32804 while (POINTER_TYPE_P (type)
32805 || TREE_CODE (type) == FUNCTION_TYPE
32806 || TREE_CODE (type) == METHOD_TYPE
32807 || TREE_CODE (type) == ARRAY_TYPE)
32808 type = TREE_TYPE (type);
32810 mode = TYPE_MODE (type);
32812 /* Check for invalid AltiVec type qualifiers. */
32813 if (type == long_double_type_node)
32814 error ("use of %<long double%> in AltiVec types is invalid");
32815 else if (type == boolean_type_node)
32816 error ("use of boolean types in AltiVec types is invalid");
32817 else if (TREE_CODE (type) == COMPLEX_TYPE)
32818 error ("use of %<complex%> in AltiVec types is invalid");
32819 else if (DECIMAL_FLOAT_MODE_P (mode))
32820 error ("use of decimal floating point types in AltiVec types is invalid");
32821 else if (!TARGET_VSX)
32823 if (type == long_unsigned_type_node || type == long_integer_type_node)
32825 if (TARGET_64BIT)
32826 error ("use of %<long%> in AltiVec types is invalid for "
32827 "64-bit code without %qs", "-mvsx");
32828 else if (rs6000_warn_altivec_long)
32829 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32830 "use %<int%>");
32832 else if (type == long_long_unsigned_type_node
32833 || type == long_long_integer_type_node)
32834 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32835 "-mvsx");
32836 else if (type == double_type_node)
32837 error ("use of %<double%> in AltiVec types is invalid without %qs",
32838 "-mvsx");
32841 switch (altivec_type)
32843 case 'v':
32844 unsigned_p = TYPE_UNSIGNED (type);
32845 switch (mode)
32847 case E_TImode:
32848 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32849 break;
32850 case E_DImode:
32851 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32852 break;
32853 case E_SImode:
32854 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32855 break;
32856 case E_HImode:
32857 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32858 break;
32859 case E_QImode:
32860 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32861 break;
32862 case E_SFmode: result = V4SF_type_node; break;
32863 case E_DFmode: result = V2DF_type_node; break;
32864 /* If the user says 'vector int bool', we may be handed the 'bool'
32865 attribute _before_ the 'vector' attribute, and so select the
32866 proper type in the 'b' case below. */
32867 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32868 case E_V2DImode: case E_V2DFmode:
32869 result = type;
32870 default: break;
32872 break;
32873 case 'b':
32874 switch (mode)
32876 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32877 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32878 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32879 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32880 default: break;
32882 break;
32883 case 'p':
32884 switch (mode)
32886 case E_V8HImode: result = pixel_V8HI_type_node;
32887 default: break;
32889 default: break;
32892 /* Propagate qualifiers attached to the element type
32893 onto the vector type. */
32894 if (result && result != type && TYPE_QUALS (type))
32895 result = build_qualified_type (result, TYPE_QUALS (type));
32897 *no_add_attrs = true; /* No need to hang on to the attribute. */
32899 if (result)
32900 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32902 return NULL_TREE;
32905 /* AltiVec defines four built-in scalar types that serve as vector
32906 elements; we must teach the compiler how to mangle them. */
32908 static const char *
32909 rs6000_mangle_type (const_tree type)
32911 type = TYPE_MAIN_VARIANT (type);
32913 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32914 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32915 return NULL;
32917 if (type == bool_char_type_node) return "U6__boolc";
32918 if (type == bool_short_type_node) return "U6__bools";
32919 if (type == pixel_type_node) return "u7__pixel";
32920 if (type == bool_int_type_node) return "U6__booli";
32921 if (type == bool_long_type_node) return "U6__booll";
32923 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32924 "g" for IBM extended double, no matter whether it is long double (using
32925 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32926 if (TARGET_FLOAT128_TYPE)
32928 if (type == ieee128_float_type_node)
32929 return "U10__float128";
32931 if (TARGET_LONG_DOUBLE_128)
32933 if (type == long_double_type_node)
32934 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32936 if (type == ibm128_float_type_node)
32937 return "g";
32941 /* Mangle IBM extended float long double as `g' (__float128) on
32942 powerpc*-linux where long-double-64 previously was the default. */
32943 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32944 && TARGET_ELF
32945 && TARGET_LONG_DOUBLE_128
32946 && !TARGET_IEEEQUAD)
32947 return "g";
32949 /* For all other types, use normal C++ mangling. */
32950 return NULL;
32953 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32954 struct attribute_spec.handler. */
32956 static tree
32957 rs6000_handle_longcall_attribute (tree *node, tree name,
32958 tree args ATTRIBUTE_UNUSED,
32959 int flags ATTRIBUTE_UNUSED,
32960 bool *no_add_attrs)
32962 if (TREE_CODE (*node) != FUNCTION_TYPE
32963 && TREE_CODE (*node) != FIELD_DECL
32964 && TREE_CODE (*node) != TYPE_DECL)
32966 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32967 name);
32968 *no_add_attrs = true;
32971 return NULL_TREE;
32974 /* Set longcall attributes on all functions declared when
32975 rs6000_default_long_calls is true. */
32976 static void
32977 rs6000_set_default_type_attributes (tree type)
32979 if (rs6000_default_long_calls
32980 && (TREE_CODE (type) == FUNCTION_TYPE
32981 || TREE_CODE (type) == METHOD_TYPE))
32982 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32983 NULL_TREE,
32984 TYPE_ATTRIBUTES (type));
32986 #if TARGET_MACHO
32987 darwin_set_default_type_attributes (type);
32988 #endif
32991 /* Return a reference suitable for calling a function with the
32992 longcall attribute. */
32995 rs6000_longcall_ref (rtx call_ref)
32997 const char *call_name;
32998 tree node;
33000 if (GET_CODE (call_ref) != SYMBOL_REF)
33001 return call_ref;
33003 /* System V adds '.' to the internal name, so skip them. */
33004 call_name = XSTR (call_ref, 0);
33005 if (*call_name == '.')
33007 while (*call_name == '.')
33008 call_name++;
33010 node = get_identifier (call_name);
33011 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
33014 return force_reg (Pmode, call_ref);
33017 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
33018 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
33019 #endif
33021 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
33022 struct attribute_spec.handler. */
33023 static tree
33024 rs6000_handle_struct_attribute (tree *node, tree name,
33025 tree args ATTRIBUTE_UNUSED,
33026 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
33028 tree *type = NULL;
33029 if (DECL_P (*node))
33031 if (TREE_CODE (*node) == TYPE_DECL)
33032 type = &TREE_TYPE (*node);
33034 else
33035 type = node;
33037 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
33038 || TREE_CODE (*type) == UNION_TYPE)))
33040 warning (OPT_Wattributes, "%qE attribute ignored", name);
33041 *no_add_attrs = true;
33044 else if ((is_attribute_p ("ms_struct", name)
33045 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
33046 || ((is_attribute_p ("gcc_struct", name)
33047 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
33049 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
33050 name);
33051 *no_add_attrs = true;
33054 return NULL_TREE;
33057 static bool
33058 rs6000_ms_bitfield_layout_p (const_tree record_type)
33060 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
33061 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
33062 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
33065 #ifdef USING_ELFOS_H
33067 /* A get_unnamed_section callback, used for switching to toc_section. */
33069 static void
33070 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33072 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33073 && TARGET_MINIMAL_TOC)
33075 if (!toc_initialized)
33077 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33078 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33079 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33080 fprintf (asm_out_file, "\t.tc ");
33081 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33082 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33083 fprintf (asm_out_file, "\n");
33085 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33086 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33087 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33088 fprintf (asm_out_file, " = .+32768\n");
33089 toc_initialized = 1;
33091 else
33092 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33094 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33096 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33097 if (!toc_initialized)
33099 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33100 toc_initialized = 1;
33103 else
33105 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33106 if (!toc_initialized)
33108 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33109 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33110 fprintf (asm_out_file, " = .+32768\n");
33111 toc_initialized = 1;
33116 /* Implement TARGET_ASM_INIT_SECTIONS. */
33118 static void
33119 rs6000_elf_asm_init_sections (void)
33121 toc_section
33122 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33124 sdata2_section
33125 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33126 SDATA2_SECTION_ASM_OP);
33129 /* Implement TARGET_SELECT_RTX_SECTION. */
33131 static section *
33132 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33133 unsigned HOST_WIDE_INT align)
33135 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33136 return toc_section;
33137 else
33138 return default_elf_select_rtx_section (mode, x, align);
33141 /* For a SYMBOL_REF, set generic flags and then perform some
33142 target-specific processing.
33144 When the AIX ABI is requested on a non-AIX system, replace the
33145 function name with the real name (with a leading .) rather than the
33146 function descriptor name. This saves a lot of overriding code to
33147 read the prefixes. */
33149 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33150 static void
33151 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33153 default_encode_section_info (decl, rtl, first);
33155 if (first
33156 && TREE_CODE (decl) == FUNCTION_DECL
33157 && !TARGET_AIX
33158 && DEFAULT_ABI == ABI_AIX)
33160 rtx sym_ref = XEXP (rtl, 0);
33161 size_t len = strlen (XSTR (sym_ref, 0));
33162 char *str = XALLOCAVEC (char, len + 2);
33163 str[0] = '.';
33164 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33165 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33169 static inline bool
33170 compare_section_name (const char *section, const char *templ)
33172 int len;
33174 len = strlen (templ);
33175 return (strncmp (section, templ, len) == 0
33176 && (section[len] == 0 || section[len] == '.'));
33179 bool
33180 rs6000_elf_in_small_data_p (const_tree decl)
33182 if (rs6000_sdata == SDATA_NONE)
33183 return false;
33185 /* We want to merge strings, so we never consider them small data. */
33186 if (TREE_CODE (decl) == STRING_CST)
33187 return false;
33189 /* Functions are never in the small data area. */
33190 if (TREE_CODE (decl) == FUNCTION_DECL)
33191 return false;
33193 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33195 const char *section = DECL_SECTION_NAME (decl);
33196 if (compare_section_name (section, ".sdata")
33197 || compare_section_name (section, ".sdata2")
33198 || compare_section_name (section, ".gnu.linkonce.s")
33199 || compare_section_name (section, ".sbss")
33200 || compare_section_name (section, ".sbss2")
33201 || compare_section_name (section, ".gnu.linkonce.sb")
33202 || strcmp (section, ".PPC.EMB.sdata0") == 0
33203 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33204 return true;
33206 else
33208 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33210 if (size > 0
33211 && size <= g_switch_value
33212 /* If it's not public, and we're not going to reference it there,
33213 there's no need to put it in the small data section. */
33214 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33215 return true;
33218 return false;
33221 #endif /* USING_ELFOS_H */
33223 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33225 static bool
33226 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33228 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33231 /* Do not place thread-local symbols refs in the object blocks. */
33233 static bool
33234 rs6000_use_blocks_for_decl_p (const_tree decl)
33236 return !DECL_THREAD_LOCAL_P (decl);
33239 /* Return a REG that occurs in ADDR with coefficient 1.
33240 ADDR can be effectively incremented by incrementing REG.
33242 r0 is special and we must not select it as an address
33243 register by this routine since our caller will try to
33244 increment the returned register via an "la" instruction. */
33247 find_addr_reg (rtx addr)
33249 while (GET_CODE (addr) == PLUS)
33251 if (GET_CODE (XEXP (addr, 0)) == REG
33252 && REGNO (XEXP (addr, 0)) != 0)
33253 addr = XEXP (addr, 0);
33254 else if (GET_CODE (XEXP (addr, 1)) == REG
33255 && REGNO (XEXP (addr, 1)) != 0)
33256 addr = XEXP (addr, 1);
33257 else if (CONSTANT_P (XEXP (addr, 0)))
33258 addr = XEXP (addr, 1);
33259 else if (CONSTANT_P (XEXP (addr, 1)))
33260 addr = XEXP (addr, 0);
33261 else
33262 gcc_unreachable ();
33264 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33265 return addr;
33268 void
33269 rs6000_fatal_bad_address (rtx op)
33271 fatal_insn ("bad address", op);
33274 #if TARGET_MACHO
33276 typedef struct branch_island_d {
33277 tree function_name;
33278 tree label_name;
33279 int line_number;
33280 } branch_island;
33283 static vec<branch_island, va_gc> *branch_islands;
33285 /* Remember to generate a branch island for far calls to the given
33286 function. */
33288 static void
33289 add_compiler_branch_island (tree label_name, tree function_name,
33290 int line_number)
33292 branch_island bi = {function_name, label_name, line_number};
33293 vec_safe_push (branch_islands, bi);
33296 /* Generate far-jump branch islands for everything recorded in
33297 branch_islands. Invoked immediately after the last instruction of
33298 the epilogue has been emitted; the branch islands must be appended
33299 to, and contiguous with, the function body. Mach-O stubs are
33300 generated in machopic_output_stub(). */
33302 static void
33303 macho_branch_islands (void)
33305 char tmp_buf[512];
33307 while (!vec_safe_is_empty (branch_islands))
33309 branch_island *bi = &branch_islands->last ();
33310 const char *label = IDENTIFIER_POINTER (bi->label_name);
33311 const char *name = IDENTIFIER_POINTER (bi->function_name);
33312 char name_buf[512];
33313 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33314 if (name[0] == '*' || name[0] == '&')
33315 strcpy (name_buf, name+1);
33316 else
33318 name_buf[0] = '_';
33319 strcpy (name_buf+1, name);
33321 strcpy (tmp_buf, "\n");
33322 strcat (tmp_buf, label);
33323 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33324 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33325 dbxout_stabd (N_SLINE, bi->line_number);
33326 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33327 if (flag_pic)
33329 if (TARGET_LINK_STACK)
33331 char name[32];
33332 get_ppc476_thunk_name (name);
33333 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33334 strcat (tmp_buf, name);
33335 strcat (tmp_buf, "\n");
33336 strcat (tmp_buf, label);
33337 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33339 else
33341 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33342 strcat (tmp_buf, label);
33343 strcat (tmp_buf, "_pic\n");
33344 strcat (tmp_buf, label);
33345 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33348 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33349 strcat (tmp_buf, name_buf);
33350 strcat (tmp_buf, " - ");
33351 strcat (tmp_buf, label);
33352 strcat (tmp_buf, "_pic)\n");
33354 strcat (tmp_buf, "\tmtlr r0\n");
33356 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33357 strcat (tmp_buf, name_buf);
33358 strcat (tmp_buf, " - ");
33359 strcat (tmp_buf, label);
33360 strcat (tmp_buf, "_pic)\n");
33362 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33364 else
33366 strcat (tmp_buf, ":\nlis r12,hi16(");
33367 strcat (tmp_buf, name_buf);
33368 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33369 strcat (tmp_buf, name_buf);
33370 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33372 output_asm_insn (tmp_buf, 0);
33373 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33374 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33375 dbxout_stabd (N_SLINE, bi->line_number);
33376 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33377 branch_islands->pop ();
33381 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33382 already there or not. */
33384 static int
33385 no_previous_def (tree function_name)
33387 branch_island *bi;
33388 unsigned ix;
33390 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33391 if (function_name == bi->function_name)
33392 return 0;
33393 return 1;
33396 /* GET_PREV_LABEL gets the label name from the previous definition of
33397 the function. */
33399 static tree
33400 get_prev_label (tree function_name)
33402 branch_island *bi;
33403 unsigned ix;
33405 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33406 if (function_name == bi->function_name)
33407 return bi->label_name;
33408 return NULL_TREE;
33411 /* INSN is either a function call or a millicode call. It may have an
33412 unconditional jump in its delay slot.
33414 CALL_DEST is the routine we are calling. */
33416 char *
33417 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33418 int cookie_operand_number)
33420 static char buf[256];
33421 if (darwin_emit_branch_islands
33422 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33423 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33425 tree labelname;
33426 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33428 if (no_previous_def (funname))
33430 rtx label_rtx = gen_label_rtx ();
33431 char *label_buf, temp_buf[256];
33432 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33433 CODE_LABEL_NUMBER (label_rtx));
33434 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33435 labelname = get_identifier (label_buf);
33436 add_compiler_branch_island (labelname, funname, insn_line (insn));
33438 else
33439 labelname = get_prev_label (funname);
33441 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33442 instruction will reach 'foo', otherwise link as 'bl L42'".
33443 "L42" should be a 'branch island', that will do a far jump to
33444 'foo'. Branch islands are generated in
33445 macho_branch_islands(). */
33446 sprintf (buf, "jbsr %%z%d,%.246s",
33447 dest_operand_number, IDENTIFIER_POINTER (labelname));
33449 else
33450 sprintf (buf, "bl %%z%d", dest_operand_number);
33451 return buf;
33454 /* Generate PIC and indirect symbol stubs. */
33456 void
33457 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33459 unsigned int length;
33460 char *symbol_name, *lazy_ptr_name;
33461 char *local_label_0;
33462 static int label = 0;
33464 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33465 symb = (*targetm.strip_name_encoding) (symb);
33468 length = strlen (symb);
33469 symbol_name = XALLOCAVEC (char, length + 32);
33470 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33472 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33473 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33475 if (flag_pic == 2)
33476 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33477 else
33478 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33480 if (flag_pic == 2)
33482 fprintf (file, "\t.align 5\n");
33484 fprintf (file, "%s:\n", stub);
33485 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33487 label++;
33488 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33489 sprintf (local_label_0, "\"L%011d$spb\"", label);
33491 fprintf (file, "\tmflr r0\n");
33492 if (TARGET_LINK_STACK)
33494 char name[32];
33495 get_ppc476_thunk_name (name);
33496 fprintf (file, "\tbl %s\n", name);
33497 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33499 else
33501 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33502 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33504 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33505 lazy_ptr_name, local_label_0);
33506 fprintf (file, "\tmtlr r0\n");
33507 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33508 (TARGET_64BIT ? "ldu" : "lwzu"),
33509 lazy_ptr_name, local_label_0);
33510 fprintf (file, "\tmtctr r12\n");
33511 fprintf (file, "\tbctr\n");
33513 else
33515 fprintf (file, "\t.align 4\n");
33517 fprintf (file, "%s:\n", stub);
33518 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33520 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33521 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33522 (TARGET_64BIT ? "ldu" : "lwzu"),
33523 lazy_ptr_name);
33524 fprintf (file, "\tmtctr r12\n");
33525 fprintf (file, "\tbctr\n");
33528 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33529 fprintf (file, "%s:\n", lazy_ptr_name);
33530 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33531 fprintf (file, "%sdyld_stub_binding_helper\n",
33532 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33535 /* Legitimize PIC addresses. If the address is already
33536 position-independent, we return ORIG. Newly generated
33537 position-independent addresses go into a reg. This is REG if non
33538 zero, otherwise we allocate register(s) as necessary. */
33540 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33543 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33544 rtx reg)
33546 rtx base, offset;
33548 if (reg == NULL && !reload_completed)
33549 reg = gen_reg_rtx (Pmode);
33551 if (GET_CODE (orig) == CONST)
33553 rtx reg_temp;
33555 if (GET_CODE (XEXP (orig, 0)) == PLUS
33556 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33557 return orig;
33559 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33561 /* Use a different reg for the intermediate value, as
33562 it will be marked UNCHANGING. */
33563 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33564 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33565 Pmode, reg_temp);
33566 offset =
33567 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33568 Pmode, reg);
33570 if (GET_CODE (offset) == CONST_INT)
33572 if (SMALL_INT (offset))
33573 return plus_constant (Pmode, base, INTVAL (offset));
33574 else if (!reload_completed)
33575 offset = force_reg (Pmode, offset);
33576 else
33578 rtx mem = force_const_mem (Pmode, orig);
33579 return machopic_legitimize_pic_address (mem, Pmode, reg);
33582 return gen_rtx_PLUS (Pmode, base, offset);
33585 /* Fall back on generic machopic code. */
33586 return machopic_legitimize_pic_address (orig, mode, reg);
33589 /* Output a .machine directive for the Darwin assembler, and call
33590 the generic start_file routine. */
33592 static void
33593 rs6000_darwin_file_start (void)
33595 static const struct
33597 const char *arg;
33598 const char *name;
33599 HOST_WIDE_INT if_set;
33600 } mapping[] = {
33601 { "ppc64", "ppc64", MASK_64BIT },
33602 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33603 { "power4", "ppc970", 0 },
33604 { "G5", "ppc970", 0 },
33605 { "7450", "ppc7450", 0 },
33606 { "7400", "ppc7400", MASK_ALTIVEC },
33607 { "G4", "ppc7400", 0 },
33608 { "750", "ppc750", 0 },
33609 { "740", "ppc750", 0 },
33610 { "G3", "ppc750", 0 },
33611 { "604e", "ppc604e", 0 },
33612 { "604", "ppc604", 0 },
33613 { "603e", "ppc603", 0 },
33614 { "603", "ppc603", 0 },
33615 { "601", "ppc601", 0 },
33616 { NULL, "ppc", 0 } };
33617 const char *cpu_id = "";
33618 size_t i;
33620 rs6000_file_start ();
33621 darwin_file_start ();
33623 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33625 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33626 cpu_id = rs6000_default_cpu;
33628 if (global_options_set.x_rs6000_cpu_index)
33629 cpu_id = processor_target_table[rs6000_cpu_index].name;
33631 /* Look through the mapping array. Pick the first name that either
33632 matches the argument, has a bit set in IF_SET that is also set
33633 in the target flags, or has a NULL name. */
33635 i = 0;
33636 while (mapping[i].arg != NULL
33637 && strcmp (mapping[i].arg, cpu_id) != 0
33638 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33639 i++;
33641 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33644 #endif /* TARGET_MACHO */
33646 #if TARGET_ELF
33647 static int
33648 rs6000_elf_reloc_rw_mask (void)
33650 if (flag_pic)
33651 return 3;
33652 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33653 return 2;
33654 else
33655 return 0;
33658 /* Record an element in the table of global constructors. SYMBOL is
33659 a SYMBOL_REF of the function to be called; PRIORITY is a number
33660 between 0 and MAX_INIT_PRIORITY.
33662 This differs from default_named_section_asm_out_constructor in
33663 that we have special handling for -mrelocatable. */
33665 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33666 static void
33667 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33669 const char *section = ".ctors";
33670 char buf[18];
33672 if (priority != DEFAULT_INIT_PRIORITY)
33674 sprintf (buf, ".ctors.%.5u",
33675 /* Invert the numbering so the linker puts us in the proper
33676 order; constructors are run from right to left, and the
33677 linker sorts in increasing order. */
33678 MAX_INIT_PRIORITY - priority);
33679 section = buf;
33682 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33683 assemble_align (POINTER_SIZE);
33685 if (DEFAULT_ABI == ABI_V4
33686 && (TARGET_RELOCATABLE || flag_pic > 1))
33688 fputs ("\t.long (", asm_out_file);
33689 output_addr_const (asm_out_file, symbol);
33690 fputs (")@fixup\n", asm_out_file);
33692 else
33693 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33696 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33697 static void
33698 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33700 const char *section = ".dtors";
33701 char buf[18];
33703 if (priority != DEFAULT_INIT_PRIORITY)
33705 sprintf (buf, ".dtors.%.5u",
33706 /* Invert the numbering so the linker puts us in the proper
33707 order; constructors are run from right to left, and the
33708 linker sorts in increasing order. */
33709 MAX_INIT_PRIORITY - priority);
33710 section = buf;
33713 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33714 assemble_align (POINTER_SIZE);
33716 if (DEFAULT_ABI == ABI_V4
33717 && (TARGET_RELOCATABLE || flag_pic > 1))
33719 fputs ("\t.long (", asm_out_file);
33720 output_addr_const (asm_out_file, symbol);
33721 fputs (")@fixup\n", asm_out_file);
33723 else
33724 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33727 void
33728 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33730 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33732 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33733 ASM_OUTPUT_LABEL (file, name);
33734 fputs (DOUBLE_INT_ASM_OP, file);
33735 rs6000_output_function_entry (file, name);
33736 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33737 if (DOT_SYMBOLS)
33739 fputs ("\t.size\t", file);
33740 assemble_name (file, name);
33741 fputs (",24\n\t.type\t.", file);
33742 assemble_name (file, name);
33743 fputs (",@function\n", file);
33744 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33746 fputs ("\t.globl\t.", file);
33747 assemble_name (file, name);
33748 putc ('\n', file);
33751 else
33752 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33753 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33754 rs6000_output_function_entry (file, name);
33755 fputs (":\n", file);
33756 return;
33759 int uses_toc;
33760 if (DEFAULT_ABI == ABI_V4
33761 && (TARGET_RELOCATABLE || flag_pic > 1)
33762 && !TARGET_SECURE_PLT
33763 && (!constant_pool_empty_p () || crtl->profile)
33764 && (uses_toc = uses_TOC ()))
33766 char buf[256];
33768 if (uses_toc == 2)
33769 switch_to_other_text_partition ();
33770 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33772 fprintf (file, "\t.long ");
33773 assemble_name (file, toc_label_name);
33774 need_toc_init = 1;
33775 putc ('-', file);
33776 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33777 assemble_name (file, buf);
33778 putc ('\n', file);
33779 if (uses_toc == 2)
33780 switch_to_other_text_partition ();
33783 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33784 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33786 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33788 char buf[256];
33790 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33792 fprintf (file, "\t.quad .TOC.-");
33793 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33794 assemble_name (file, buf);
33795 putc ('\n', file);
33798 if (DEFAULT_ABI == ABI_AIX)
33800 const char *desc_name, *orig_name;
33802 orig_name = (*targetm.strip_name_encoding) (name);
33803 desc_name = orig_name;
33804 while (*desc_name == '.')
33805 desc_name++;
33807 if (TREE_PUBLIC (decl))
33808 fprintf (file, "\t.globl %s\n", desc_name);
33810 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33811 fprintf (file, "%s:\n", desc_name);
33812 fprintf (file, "\t.long %s\n", orig_name);
33813 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33814 fputs ("\t.long 0\n", file);
33815 fprintf (file, "\t.previous\n");
33817 ASM_OUTPUT_LABEL (file, name);
33820 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33821 static void
33822 rs6000_elf_file_end (void)
33824 #ifdef HAVE_AS_GNU_ATTRIBUTE
33825 /* ??? The value emitted depends on options active at file end.
33826 Assume anyone using #pragma or attributes that might change
33827 options knows what they are doing. */
33828 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33829 && rs6000_passes_float)
33831 int fp;
33833 if (TARGET_DF_FPR)
33834 fp = 1;
33835 else if (TARGET_SF_FPR)
33836 fp = 3;
33837 else
33838 fp = 2;
33839 if (rs6000_passes_long_double)
33841 if (!TARGET_LONG_DOUBLE_128)
33842 fp |= 2 * 4;
33843 else if (TARGET_IEEEQUAD)
33844 fp |= 3 * 4;
33845 else
33846 fp |= 1 * 4;
33848 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33850 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33852 if (rs6000_passes_vector)
33853 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33854 (TARGET_ALTIVEC_ABI ? 2 : 1));
33855 if (rs6000_returns_struct)
33856 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33857 aix_struct_return ? 2 : 1);
33859 #endif
33860 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33861 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33862 file_end_indicate_exec_stack ();
33863 #endif
33865 if (flag_split_stack)
33866 file_end_indicate_split_stack ();
33868 if (cpu_builtin_p)
33870 /* We have expanded a CPU builtin, so we need to emit a reference to
33871 the special symbol that LIBC uses to declare it supports the
33872 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33873 switch_to_section (data_section);
33874 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33875 fprintf (asm_out_file, "\t%s %s\n",
33876 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33879 #endif
33881 #if TARGET_XCOFF
33883 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33884 #define HAVE_XCOFF_DWARF_EXTRAS 0
33885 #endif
33887 static enum unwind_info_type
33888 rs6000_xcoff_debug_unwind_info (void)
33890 return UI_NONE;
33893 static void
33894 rs6000_xcoff_asm_output_anchor (rtx symbol)
33896 char buffer[100];
33898 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33899 SYMBOL_REF_BLOCK_OFFSET (symbol));
33900 fprintf (asm_out_file, "%s", SET_ASM_OP);
33901 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33902 fprintf (asm_out_file, ",");
33903 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33904 fprintf (asm_out_file, "\n");
33907 static void
33908 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33910 fputs (GLOBAL_ASM_OP, stream);
33911 RS6000_OUTPUT_BASENAME (stream, name);
33912 putc ('\n', stream);
33915 /* A get_unnamed_decl callback, used for read-only sections. PTR
33916 points to the section string variable. */
33918 static void
33919 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33921 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33922 *(const char *const *) directive,
33923 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33926 /* Likewise for read-write sections. */
33928 static void
33929 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33931 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33932 *(const char *const *) directive,
33933 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33936 static void
33937 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33939 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33940 *(const char *const *) directive,
33941 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33944 /* A get_unnamed_section callback, used for switching to toc_section. */
33946 static void
33947 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33949 if (TARGET_MINIMAL_TOC)
33951 /* toc_section is always selected at least once from
33952 rs6000_xcoff_file_start, so this is guaranteed to
33953 always be defined once and only once in each file. */
33954 if (!toc_initialized)
33956 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33957 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33958 toc_initialized = 1;
33960 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33961 (TARGET_32BIT ? "" : ",3"));
33963 else
33964 fputs ("\t.toc\n", asm_out_file);
33967 /* Implement TARGET_ASM_INIT_SECTIONS. */
33969 static void
33970 rs6000_xcoff_asm_init_sections (void)
33972 read_only_data_section
33973 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33974 &xcoff_read_only_section_name);
33976 private_data_section
33977 = get_unnamed_section (SECTION_WRITE,
33978 rs6000_xcoff_output_readwrite_section_asm_op,
33979 &xcoff_private_data_section_name);
33981 tls_data_section
33982 = get_unnamed_section (SECTION_TLS,
33983 rs6000_xcoff_output_tls_section_asm_op,
33984 &xcoff_tls_data_section_name);
33986 tls_private_data_section
33987 = get_unnamed_section (SECTION_TLS,
33988 rs6000_xcoff_output_tls_section_asm_op,
33989 &xcoff_private_data_section_name);
33991 read_only_private_data_section
33992 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33993 &xcoff_private_data_section_name);
33995 toc_section
33996 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33998 readonly_data_section = read_only_data_section;
34001 static int
34002 rs6000_xcoff_reloc_rw_mask (void)
34004 return 3;
34007 static void
34008 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
34009 tree decl ATTRIBUTE_UNUSED)
34011 int smclass;
34012 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
34014 if (flags & SECTION_EXCLUDE)
34015 smclass = 4;
34016 else if (flags & SECTION_DEBUG)
34018 fprintf (asm_out_file, "\t.dwsect %s\n", name);
34019 return;
34021 else if (flags & SECTION_CODE)
34022 smclass = 0;
34023 else if (flags & SECTION_TLS)
34024 smclass = 3;
34025 else if (flags & SECTION_WRITE)
34026 smclass = 2;
34027 else
34028 smclass = 1;
34030 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
34031 (flags & SECTION_CODE) ? "." : "",
34032 name, suffix[smclass], flags & SECTION_ENTSIZE);
34035 #define IN_NAMED_SECTION(DECL) \
34036 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
34037 && DECL_SECTION_NAME (DECL) != NULL)
34039 static section *
34040 rs6000_xcoff_select_section (tree decl, int reloc,
34041 unsigned HOST_WIDE_INT align)
34043 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
34044 named section. */
34045 if (align > BIGGEST_ALIGNMENT)
34047 resolve_unique_section (decl, reloc, true);
34048 if (IN_NAMED_SECTION (decl))
34049 return get_named_section (decl, NULL, reloc);
34052 if (decl_readonly_section (decl, reloc))
34054 if (TREE_PUBLIC (decl))
34055 return read_only_data_section;
34056 else
34057 return read_only_private_data_section;
34059 else
34061 #if HAVE_AS_TLS
34062 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34064 if (TREE_PUBLIC (decl))
34065 return tls_data_section;
34066 else if (bss_initializer_p (decl))
34068 /* Convert to COMMON to emit in BSS. */
34069 DECL_COMMON (decl) = 1;
34070 return tls_comm_section;
34072 else
34073 return tls_private_data_section;
34075 else
34076 #endif
34077 if (TREE_PUBLIC (decl))
34078 return data_section;
34079 else
34080 return private_data_section;
34084 static void
34085 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
34087 const char *name;
34089 /* Use select_section for private data and uninitialized data with
34090 alignment <= BIGGEST_ALIGNMENT. */
34091 if (!TREE_PUBLIC (decl)
34092 || DECL_COMMON (decl)
34093 || (DECL_INITIAL (decl) == NULL_TREE
34094 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
34095 || DECL_INITIAL (decl) == error_mark_node
34096 || (flag_zero_initialized_in_bss
34097 && initializer_zerop (DECL_INITIAL (decl))))
34098 return;
34100 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
34101 name = (*targetm.strip_name_encoding) (name);
34102 set_decl_section_name (decl, name);
34105 /* Select section for constant in constant pool.
34107 On RS/6000, all constants are in the private read-only data area.
34108 However, if this is being placed in the TOC it must be output as a
34109 toc entry. */
34111 static section *
34112 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34113 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34115 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34116 return toc_section;
34117 else
34118 return read_only_private_data_section;
34121 /* Remove any trailing [DS] or the like from the symbol name. */
34123 static const char *
34124 rs6000_xcoff_strip_name_encoding (const char *name)
34126 size_t len;
34127 if (*name == '*')
34128 name++;
34129 len = strlen (name);
34130 if (name[len - 1] == ']')
34131 return ggc_alloc_string (name, len - 4);
34132 else
34133 return name;
34136 /* Section attributes. AIX is always PIC. */
34138 static unsigned int
34139 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34141 unsigned int align;
34142 unsigned int flags = default_section_type_flags (decl, name, reloc);
34144 /* Align to at least UNIT size. */
34145 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34146 align = MIN_UNITS_PER_WORD;
34147 else
34148 /* Increase alignment of large objects if not already stricter. */
34149 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34150 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34151 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34153 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34156 /* Output at beginning of assembler file.
34158 Initialize the section names for the RS/6000 at this point.
34160 Specify filename, including full path, to assembler.
34162 We want to go into the TOC section so at least one .toc will be emitted.
34163 Also, in order to output proper .bs/.es pairs, we need at least one static
34164 [RW] section emitted.
34166 Finally, declare mcount when profiling to make the assembler happy. */
34168 static void
34169 rs6000_xcoff_file_start (void)
34171 rs6000_gen_section_name (&xcoff_bss_section_name,
34172 main_input_filename, ".bss_");
34173 rs6000_gen_section_name (&xcoff_private_data_section_name,
34174 main_input_filename, ".rw_");
34175 rs6000_gen_section_name (&xcoff_read_only_section_name,
34176 main_input_filename, ".ro_");
34177 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34178 main_input_filename, ".tls_");
34179 rs6000_gen_section_name (&xcoff_tbss_section_name,
34180 main_input_filename, ".tbss_[UL]");
34182 fputs ("\t.file\t", asm_out_file);
34183 output_quoted_string (asm_out_file, main_input_filename);
34184 fputc ('\n', asm_out_file);
34185 if (write_symbols != NO_DEBUG)
34186 switch_to_section (private_data_section);
34187 switch_to_section (toc_section);
34188 switch_to_section (text_section);
34189 if (profile_flag)
34190 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34191 rs6000_file_start ();
34194 /* Output at end of assembler file.
34195 On the RS/6000, referencing data should automatically pull in text. */
34197 static void
34198 rs6000_xcoff_file_end (void)
34200 switch_to_section (text_section);
34201 fputs ("_section_.text:\n", asm_out_file);
34202 switch_to_section (data_section);
34203 fputs (TARGET_32BIT
34204 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34205 asm_out_file);
34208 struct declare_alias_data
34210 FILE *file;
34211 bool function_descriptor;
34214 /* Declare alias N. A helper function for for_node_and_aliases. */
34216 static bool
34217 rs6000_declare_alias (struct symtab_node *n, void *d)
34219 struct declare_alias_data *data = (struct declare_alias_data *)d;
34220 /* Main symbol is output specially, because varasm machinery does part of
34221 the job for us - we do not need to declare .globl/lglobs and such. */
34222 if (!n->alias || n->weakref)
34223 return false;
34225 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34226 return false;
34228 /* Prevent assemble_alias from trying to use .set pseudo operation
34229 that does not behave as expected by the middle-end. */
34230 TREE_ASM_WRITTEN (n->decl) = true;
34232 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34233 char *buffer = (char *) alloca (strlen (name) + 2);
34234 char *p;
34235 int dollar_inside = 0;
34237 strcpy (buffer, name);
34238 p = strchr (buffer, '$');
34239 while (p) {
34240 *p = '_';
34241 dollar_inside++;
34242 p = strchr (p + 1, '$');
34244 if (TREE_PUBLIC (n->decl))
34246 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34248 if (dollar_inside) {
34249 if (data->function_descriptor)
34250 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34251 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34253 if (data->function_descriptor)
34255 fputs ("\t.globl .", data->file);
34256 RS6000_OUTPUT_BASENAME (data->file, buffer);
34257 putc ('\n', data->file);
34259 fputs ("\t.globl ", data->file);
34260 RS6000_OUTPUT_BASENAME (data->file, buffer);
34261 putc ('\n', data->file);
34263 #ifdef ASM_WEAKEN_DECL
34264 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34265 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34266 #endif
34268 else
34270 if (dollar_inside)
34272 if (data->function_descriptor)
34273 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34274 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34276 if (data->function_descriptor)
34278 fputs ("\t.lglobl .", data->file);
34279 RS6000_OUTPUT_BASENAME (data->file, buffer);
34280 putc ('\n', data->file);
34282 fputs ("\t.lglobl ", data->file);
34283 RS6000_OUTPUT_BASENAME (data->file, buffer);
34284 putc ('\n', data->file);
34286 if (data->function_descriptor)
34287 fputs (".", data->file);
34288 RS6000_OUTPUT_BASENAME (data->file, buffer);
34289 fputs (":\n", data->file);
34290 return false;
34294 #ifdef HAVE_GAS_HIDDEN
34295 /* Helper function to calculate visibility of a DECL
34296 and return the value as a const string. */
34298 static const char *
34299 rs6000_xcoff_visibility (tree decl)
34301 static const char * const visibility_types[] = {
34302 "", ",protected", ",hidden", ",internal"
34305 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34307 if (TREE_CODE (decl) == FUNCTION_DECL
34308 && cgraph_node::get (decl)
34309 && cgraph_node::get (decl)->instrumentation_clone
34310 && cgraph_node::get (decl)->instrumented_version)
34311 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
34313 return visibility_types[vis];
34315 #endif
34318 /* This macro produces the initial definition of a function name.
34319 On the RS/6000, we need to place an extra '.' in the function name and
34320 output the function descriptor.
34321 Dollar signs are converted to underscores.
34323 The csect for the function will have already been created when
34324 text_section was selected. We do have to go back to that csect, however.
34326 The third and fourth parameters to the .function pseudo-op (16 and 044)
34327 are placeholders which no longer have any use.
34329 Because AIX assembler's .set command has unexpected semantics, we output
34330 all aliases as alternative labels in front of the definition. */
34332 void
34333 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34335 char *buffer = (char *) alloca (strlen (name) + 1);
34336 char *p;
34337 int dollar_inside = 0;
34338 struct declare_alias_data data = {file, false};
34340 strcpy (buffer, name);
34341 p = strchr (buffer, '$');
34342 while (p) {
34343 *p = '_';
34344 dollar_inside++;
34345 p = strchr (p + 1, '$');
34347 if (TREE_PUBLIC (decl))
34349 if (!RS6000_WEAK || !DECL_WEAK (decl))
34351 if (dollar_inside) {
34352 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34353 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34355 fputs ("\t.globl .", file);
34356 RS6000_OUTPUT_BASENAME (file, buffer);
34357 #ifdef HAVE_GAS_HIDDEN
34358 fputs (rs6000_xcoff_visibility (decl), file);
34359 #endif
34360 putc ('\n', file);
34363 else
34365 if (dollar_inside) {
34366 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34367 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34369 fputs ("\t.lglobl .", file);
34370 RS6000_OUTPUT_BASENAME (file, buffer);
34371 putc ('\n', file);
34373 fputs ("\t.csect ", file);
34374 RS6000_OUTPUT_BASENAME (file, buffer);
34375 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34376 RS6000_OUTPUT_BASENAME (file, buffer);
34377 fputs (":\n", file);
34378 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34379 &data, true);
34380 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34381 RS6000_OUTPUT_BASENAME (file, buffer);
34382 fputs (", TOC[tc0], 0\n", file);
34383 in_section = NULL;
34384 switch_to_section (function_section (decl));
34385 putc ('.', file);
34386 RS6000_OUTPUT_BASENAME (file, buffer);
34387 fputs (":\n", file);
34388 data.function_descriptor = true;
34389 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34390 &data, true);
34391 if (!DECL_IGNORED_P (decl))
34393 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34394 xcoffout_declare_function (file, decl, buffer);
34395 else if (write_symbols == DWARF2_DEBUG)
34397 name = (*targetm.strip_name_encoding) (name);
34398 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34401 return;
34405 /* Output assembly language to globalize a symbol from a DECL,
34406 possibly with visibility. */
34408 void
34409 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34411 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34412 fputs (GLOBAL_ASM_OP, stream);
34413 RS6000_OUTPUT_BASENAME (stream, name);
34414 #ifdef HAVE_GAS_HIDDEN
34415 fputs (rs6000_xcoff_visibility (decl), stream);
34416 #endif
34417 putc ('\n', stream);
34420 /* Output assembly language to define a symbol as COMMON from a DECL,
34421 possibly with visibility. */
34423 void
34424 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34425 tree decl ATTRIBUTE_UNUSED,
34426 const char *name,
34427 unsigned HOST_WIDE_INT size,
34428 unsigned HOST_WIDE_INT align)
34430 unsigned HOST_WIDE_INT align2 = 2;
34432 if (align > 32)
34433 align2 = floor_log2 (align / BITS_PER_UNIT);
34434 else if (size > 4)
34435 align2 = 3;
34437 fputs (COMMON_ASM_OP, stream);
34438 RS6000_OUTPUT_BASENAME (stream, name);
34440 fprintf (stream,
34441 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34442 size, align2);
34444 #ifdef HAVE_GAS_HIDDEN
34445 if (decl != NULL)
34446 fputs (rs6000_xcoff_visibility (decl), stream);
34447 #endif
34448 putc ('\n', stream);
34451 /* This macro produces the initial definition of a object (variable) name.
34452 Because AIX assembler's .set command has unexpected semantics, we output
34453 all aliases as alternative labels in front of the definition. */
34455 void
34456 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34458 struct declare_alias_data data = {file, false};
34459 RS6000_OUTPUT_BASENAME (file, name);
34460 fputs (":\n", file);
34461 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34462 &data, true);
34465 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34467 void
34468 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34470 fputs (integer_asm_op (size, FALSE), file);
34471 assemble_name (file, label);
34472 fputs ("-$", file);
34475 /* Output a symbol offset relative to the dbase for the current object.
34476 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34477 signed offsets.
34479 __gcc_unwind_dbase is embedded in all executables/libraries through
34480 libgcc/config/rs6000/crtdbase.S. */
34482 void
34483 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34485 fputs (integer_asm_op (size, FALSE), file);
34486 assemble_name (file, label);
34487 fputs("-__gcc_unwind_dbase", file);
34490 #ifdef HAVE_AS_TLS
34491 static void
34492 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34494 rtx symbol;
34495 int flags;
34496 const char *symname;
34498 default_encode_section_info (decl, rtl, first);
34500 /* Careful not to prod global register variables. */
34501 if (!MEM_P (rtl))
34502 return;
34503 symbol = XEXP (rtl, 0);
34504 if (GET_CODE (symbol) != SYMBOL_REF)
34505 return;
34507 flags = SYMBOL_REF_FLAGS (symbol);
34509 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34510 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34512 SYMBOL_REF_FLAGS (symbol) = flags;
34514 /* Append mapping class to extern decls. */
34515 symname = XSTR (symbol, 0);
34516 if (decl /* sync condition with assemble_external () */
34517 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34518 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34519 || TREE_CODE (decl) == FUNCTION_DECL)
34520 && symname[strlen (symname) - 1] != ']')
34522 char *newname = (char *) alloca (strlen (symname) + 5);
34523 strcpy (newname, symname);
34524 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34525 ? "[DS]" : "[UA]"));
34526 XSTR (symbol, 0) = ggc_strdup (newname);
34529 #endif /* HAVE_AS_TLS */
34530 #endif /* TARGET_XCOFF */
34532 void
34533 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34534 const char *name, const char *val)
34536 fputs ("\t.weak\t", stream);
34537 RS6000_OUTPUT_BASENAME (stream, name);
34538 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34539 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34541 if (TARGET_XCOFF)
34542 fputs ("[DS]", stream);
34543 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34544 if (TARGET_XCOFF)
34545 fputs (rs6000_xcoff_visibility (decl), stream);
34546 #endif
34547 fputs ("\n\t.weak\t.", stream);
34548 RS6000_OUTPUT_BASENAME (stream, name);
34550 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34551 if (TARGET_XCOFF)
34552 fputs (rs6000_xcoff_visibility (decl), stream);
34553 #endif
34554 fputc ('\n', stream);
34555 if (val)
34557 #ifdef ASM_OUTPUT_DEF
34558 ASM_OUTPUT_DEF (stream, name, val);
34559 #endif
34560 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34561 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34563 fputs ("\t.set\t.", stream);
34564 RS6000_OUTPUT_BASENAME (stream, name);
34565 fputs (",.", stream);
34566 RS6000_OUTPUT_BASENAME (stream, val);
34567 fputc ('\n', stream);
34573 /* Return true if INSN should not be copied. */
34575 static bool
34576 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34578 return recog_memoized (insn) >= 0
34579 && get_attr_cannot_copy (insn);
34582 /* Compute a (partial) cost for rtx X. Return true if the complete
34583 cost has been computed, and false if subexpressions should be
34584 scanned. In either case, *TOTAL contains the cost result. */
34586 static bool
34587 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34588 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34590 int code = GET_CODE (x);
34592 switch (code)
34594 /* On the RS/6000, if it is valid in the insn, it is free. */
34595 case CONST_INT:
34596 if (((outer_code == SET
34597 || outer_code == PLUS
34598 || outer_code == MINUS)
34599 && (satisfies_constraint_I (x)
34600 || satisfies_constraint_L (x)))
34601 || (outer_code == AND
34602 && (satisfies_constraint_K (x)
34603 || (mode == SImode
34604 ? satisfies_constraint_L (x)
34605 : satisfies_constraint_J (x))))
34606 || ((outer_code == IOR || outer_code == XOR)
34607 && (satisfies_constraint_K (x)
34608 || (mode == SImode
34609 ? satisfies_constraint_L (x)
34610 : satisfies_constraint_J (x))))
34611 || outer_code == ASHIFT
34612 || outer_code == ASHIFTRT
34613 || outer_code == LSHIFTRT
34614 || outer_code == ROTATE
34615 || outer_code == ROTATERT
34616 || outer_code == ZERO_EXTRACT
34617 || (outer_code == MULT
34618 && satisfies_constraint_I (x))
34619 || ((outer_code == DIV || outer_code == UDIV
34620 || outer_code == MOD || outer_code == UMOD)
34621 && exact_log2 (INTVAL (x)) >= 0)
34622 || (outer_code == COMPARE
34623 && (satisfies_constraint_I (x)
34624 || satisfies_constraint_K (x)))
34625 || ((outer_code == EQ || outer_code == NE)
34626 && (satisfies_constraint_I (x)
34627 || satisfies_constraint_K (x)
34628 || (mode == SImode
34629 ? satisfies_constraint_L (x)
34630 : satisfies_constraint_J (x))))
34631 || (outer_code == GTU
34632 && satisfies_constraint_I (x))
34633 || (outer_code == LTU
34634 && satisfies_constraint_P (x)))
34636 *total = 0;
34637 return true;
34639 else if ((outer_code == PLUS
34640 && reg_or_add_cint_operand (x, VOIDmode))
34641 || (outer_code == MINUS
34642 && reg_or_sub_cint_operand (x, VOIDmode))
34643 || ((outer_code == SET
34644 || outer_code == IOR
34645 || outer_code == XOR)
34646 && (INTVAL (x)
34647 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34649 *total = COSTS_N_INSNS (1);
34650 return true;
34652 /* FALLTHRU */
34654 case CONST_DOUBLE:
34655 case CONST_WIDE_INT:
34656 case CONST:
34657 case HIGH:
34658 case SYMBOL_REF:
34659 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34660 return true;
34662 case MEM:
34663 /* When optimizing for size, MEM should be slightly more expensive
34664 than generating address, e.g., (plus (reg) (const)).
34665 L1 cache latency is about two instructions. */
34666 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34667 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34668 *total += COSTS_N_INSNS (100);
34669 return true;
34671 case LABEL_REF:
34672 *total = 0;
34673 return true;
34675 case PLUS:
34676 case MINUS:
34677 if (FLOAT_MODE_P (mode))
34678 *total = rs6000_cost->fp;
34679 else
34680 *total = COSTS_N_INSNS (1);
34681 return false;
34683 case MULT:
34684 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34685 && satisfies_constraint_I (XEXP (x, 1)))
34687 if (INTVAL (XEXP (x, 1)) >= -256
34688 && INTVAL (XEXP (x, 1)) <= 255)
34689 *total = rs6000_cost->mulsi_const9;
34690 else
34691 *total = rs6000_cost->mulsi_const;
34693 else if (mode == SFmode)
34694 *total = rs6000_cost->fp;
34695 else if (FLOAT_MODE_P (mode))
34696 *total = rs6000_cost->dmul;
34697 else if (mode == DImode)
34698 *total = rs6000_cost->muldi;
34699 else
34700 *total = rs6000_cost->mulsi;
34701 return false;
34703 case FMA:
34704 if (mode == SFmode)
34705 *total = rs6000_cost->fp;
34706 else
34707 *total = rs6000_cost->dmul;
34708 break;
34710 case DIV:
34711 case MOD:
34712 if (FLOAT_MODE_P (mode))
34714 *total = mode == DFmode ? rs6000_cost->ddiv
34715 : rs6000_cost->sdiv;
34716 return false;
34718 /* FALLTHRU */
34720 case UDIV:
34721 case UMOD:
34722 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34723 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34725 if (code == DIV || code == MOD)
34726 /* Shift, addze */
34727 *total = COSTS_N_INSNS (2);
34728 else
34729 /* Shift */
34730 *total = COSTS_N_INSNS (1);
34732 else
34734 if (GET_MODE (XEXP (x, 1)) == DImode)
34735 *total = rs6000_cost->divdi;
34736 else
34737 *total = rs6000_cost->divsi;
34739 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34740 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34741 *total += COSTS_N_INSNS (2);
34742 return false;
34744 case CTZ:
34745 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34746 return false;
34748 case FFS:
34749 *total = COSTS_N_INSNS (4);
34750 return false;
34752 case POPCOUNT:
34753 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34754 return false;
34756 case PARITY:
34757 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34758 return false;
34760 case NOT:
34761 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34762 *total = 0;
34763 else
34764 *total = COSTS_N_INSNS (1);
34765 return false;
34767 case AND:
34768 if (CONST_INT_P (XEXP (x, 1)))
34770 rtx left = XEXP (x, 0);
34771 rtx_code left_code = GET_CODE (left);
34773 /* rotate-and-mask: 1 insn. */
34774 if ((left_code == ROTATE
34775 || left_code == ASHIFT
34776 || left_code == LSHIFTRT)
34777 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34779 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34780 if (!CONST_INT_P (XEXP (left, 1)))
34781 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34782 *total += COSTS_N_INSNS (1);
34783 return true;
34786 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34787 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34788 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34789 || (val & 0xffff) == val
34790 || (val & 0xffff0000) == val
34791 || ((val & 0xffff) == 0 && mode == SImode))
34793 *total = rtx_cost (left, mode, AND, 0, speed);
34794 *total += COSTS_N_INSNS (1);
34795 return true;
34798 /* 2 insns. */
34799 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34801 *total = rtx_cost (left, mode, AND, 0, speed);
34802 *total += COSTS_N_INSNS (2);
34803 return true;
34807 *total = COSTS_N_INSNS (1);
34808 return false;
34810 case IOR:
34811 /* FIXME */
34812 *total = COSTS_N_INSNS (1);
34813 return true;
34815 case CLZ:
34816 case XOR:
34817 case ZERO_EXTRACT:
34818 *total = COSTS_N_INSNS (1);
34819 return false;
34821 case ASHIFT:
34822 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34823 the sign extend and shift separately within the insn. */
34824 if (TARGET_EXTSWSLI && mode == DImode
34825 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34826 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34828 *total = 0;
34829 return false;
34831 /* fall through */
34833 case ASHIFTRT:
34834 case LSHIFTRT:
34835 case ROTATE:
34836 case ROTATERT:
34837 /* Handle mul_highpart. */
34838 if (outer_code == TRUNCATE
34839 && GET_CODE (XEXP (x, 0)) == MULT)
34841 if (mode == DImode)
34842 *total = rs6000_cost->muldi;
34843 else
34844 *total = rs6000_cost->mulsi;
34845 return true;
34847 else if (outer_code == AND)
34848 *total = 0;
34849 else
34850 *total = COSTS_N_INSNS (1);
34851 return false;
34853 case SIGN_EXTEND:
34854 case ZERO_EXTEND:
34855 if (GET_CODE (XEXP (x, 0)) == MEM)
34856 *total = 0;
34857 else
34858 *total = COSTS_N_INSNS (1);
34859 return false;
34861 case COMPARE:
34862 case NEG:
34863 case ABS:
34864 if (!FLOAT_MODE_P (mode))
34866 *total = COSTS_N_INSNS (1);
34867 return false;
34869 /* FALLTHRU */
34871 case FLOAT:
34872 case UNSIGNED_FLOAT:
34873 case FIX:
34874 case UNSIGNED_FIX:
34875 case FLOAT_TRUNCATE:
34876 *total = rs6000_cost->fp;
34877 return false;
34879 case FLOAT_EXTEND:
34880 if (mode == DFmode)
34881 *total = rs6000_cost->sfdf_convert;
34882 else
34883 *total = rs6000_cost->fp;
34884 return false;
34886 case UNSPEC:
34887 switch (XINT (x, 1))
34889 case UNSPEC_FRSP:
34890 *total = rs6000_cost->fp;
34891 return true;
34893 default:
34894 break;
34896 break;
34898 case CALL:
34899 case IF_THEN_ELSE:
34900 if (!speed)
34902 *total = COSTS_N_INSNS (1);
34903 return true;
34905 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34907 *total = rs6000_cost->fp;
34908 return false;
34910 break;
34912 case NE:
34913 case EQ:
34914 case GTU:
34915 case LTU:
34916 /* Carry bit requires mode == Pmode.
34917 NEG or PLUS already counted so only add one. */
34918 if (mode == Pmode
34919 && (outer_code == NEG || outer_code == PLUS))
34921 *total = COSTS_N_INSNS (1);
34922 return true;
34924 if (outer_code == SET)
34926 if (XEXP (x, 1) == const0_rtx)
34928 if (TARGET_ISEL && !TARGET_MFCRF)
34929 *total = COSTS_N_INSNS (8);
34930 else
34931 *total = COSTS_N_INSNS (2);
34932 return true;
34934 else
34936 *total = COSTS_N_INSNS (3);
34937 return false;
34940 /* FALLTHRU */
34942 case GT:
34943 case LT:
34944 case UNORDERED:
34945 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34947 if (TARGET_ISEL && !TARGET_MFCRF)
34948 *total = COSTS_N_INSNS (8);
34949 else
34950 *total = COSTS_N_INSNS (2);
34951 return true;
34953 /* CC COMPARE. */
34954 if (outer_code == COMPARE)
34956 *total = 0;
34957 return true;
34959 break;
34961 default:
34962 break;
34965 return false;
34968 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34970 static bool
34971 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34972 int opno, int *total, bool speed)
34974 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34976 fprintf (stderr,
34977 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34978 "opno = %d, total = %d, speed = %s, x:\n",
34979 ret ? "complete" : "scan inner",
34980 GET_MODE_NAME (mode),
34981 GET_RTX_NAME (outer_code),
34982 opno,
34983 *total,
34984 speed ? "true" : "false");
34986 debug_rtx (x);
34988 return ret;
34991 static int
34992 rs6000_insn_cost (rtx_insn *insn, bool speed)
34994 if (recog_memoized (insn) < 0)
34995 return 0;
34997 if (!speed)
34998 return get_attr_length (insn);
35000 int cost = get_attr_cost (insn);
35001 if (cost > 0)
35002 return cost;
35004 int n = get_attr_length (insn) / 4;
35005 enum attr_type type = get_attr_type (insn);
35007 switch (type)
35009 case TYPE_LOAD:
35010 case TYPE_FPLOAD:
35011 case TYPE_VECLOAD:
35012 cost = COSTS_N_INSNS (n + 1);
35013 break;
35015 case TYPE_MUL:
35016 switch (get_attr_size (insn))
35018 case SIZE_8:
35019 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
35020 break;
35021 case SIZE_16:
35022 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
35023 break;
35024 case SIZE_32:
35025 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
35026 break;
35027 case SIZE_64:
35028 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
35029 break;
35030 default:
35031 gcc_unreachable ();
35033 break;
35034 case TYPE_DIV:
35035 switch (get_attr_size (insn))
35037 case SIZE_32:
35038 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
35039 break;
35040 case SIZE_64:
35041 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
35042 break;
35043 default:
35044 gcc_unreachable ();
35046 break;
35048 case TYPE_FP:
35049 cost = n * rs6000_cost->fp;
35050 break;
35051 case TYPE_DMUL:
35052 cost = n * rs6000_cost->dmul;
35053 break;
35054 case TYPE_SDIV:
35055 cost = n * rs6000_cost->sdiv;
35056 break;
35057 case TYPE_DDIV:
35058 cost = n * rs6000_cost->ddiv;
35059 break;
35061 case TYPE_SYNC:
35062 case TYPE_LOAD_L:
35063 case TYPE_MFCR:
35064 case TYPE_MFCRF:
35065 cost = COSTS_N_INSNS (n + 2);
35066 break;
35068 default:
35069 cost = COSTS_N_INSNS (n);
35072 return cost;
35075 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
35077 static int
35078 rs6000_debug_address_cost (rtx x, machine_mode mode,
35079 addr_space_t as, bool speed)
35081 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
35083 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
35084 ret, speed ? "true" : "false");
35085 debug_rtx (x);
35087 return ret;
35091 /* A C expression returning the cost of moving data from a register of class
35092 CLASS1 to one of CLASS2. */
35094 static int
35095 rs6000_register_move_cost (machine_mode mode,
35096 reg_class_t from, reg_class_t to)
35098 int ret;
35100 if (TARGET_DEBUG_COST)
35101 dbg_cost_ctrl++;
35103 /* Moves from/to GENERAL_REGS. */
35104 if (reg_classes_intersect_p (to, GENERAL_REGS)
35105 || reg_classes_intersect_p (from, GENERAL_REGS))
35107 reg_class_t rclass = from;
35109 if (! reg_classes_intersect_p (to, GENERAL_REGS))
35110 rclass = to;
35112 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
35113 ret = (rs6000_memory_move_cost (mode, rclass, false)
35114 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
35116 /* It's more expensive to move CR_REGS than CR0_REGS because of the
35117 shift. */
35118 else if (rclass == CR_REGS)
35119 ret = 4;
35121 /* For those processors that have slow LR/CTR moves, make them more
35122 expensive than memory in order to bias spills to memory .*/
35123 else if ((rs6000_cpu == PROCESSOR_POWER6
35124 || rs6000_cpu == PROCESSOR_POWER7
35125 || rs6000_cpu == PROCESSOR_POWER8
35126 || rs6000_cpu == PROCESSOR_POWER9)
35127 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35128 ret = 6 * hard_regno_nregs (0, mode);
35130 else
35131 /* A move will cost one instruction per GPR moved. */
35132 ret = 2 * hard_regno_nregs (0, mode);
35135 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35136 else if (VECTOR_MEM_VSX_P (mode)
35137 && reg_classes_intersect_p (to, VSX_REGS)
35138 && reg_classes_intersect_p (from, VSX_REGS))
35139 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35141 /* Moving between two similar registers is just one instruction. */
35142 else if (reg_classes_intersect_p (to, from))
35143 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35145 /* Everything else has to go through GENERAL_REGS. */
35146 else
35147 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35148 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35150 if (TARGET_DEBUG_COST)
35152 if (dbg_cost_ctrl == 1)
35153 fprintf (stderr,
35154 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35155 ret, GET_MODE_NAME (mode), reg_class_names[from],
35156 reg_class_names[to]);
35157 dbg_cost_ctrl--;
35160 return ret;
35163 /* A C expressions returning the cost of moving data of MODE from a register to
35164 or from memory. */
35166 static int
35167 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35168 bool in ATTRIBUTE_UNUSED)
35170 int ret;
35172 if (TARGET_DEBUG_COST)
35173 dbg_cost_ctrl++;
35175 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35176 ret = 4 * hard_regno_nregs (0, mode);
35177 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35178 || reg_classes_intersect_p (rclass, VSX_REGS)))
35179 ret = 4 * hard_regno_nregs (32, mode);
35180 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35181 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35182 else
35183 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35185 if (TARGET_DEBUG_COST)
35187 if (dbg_cost_ctrl == 1)
35188 fprintf (stderr,
35189 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35190 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35191 dbg_cost_ctrl--;
35194 return ret;
35197 /* Returns a code for a target-specific builtin that implements
35198 reciprocal of the function, or NULL_TREE if not available. */
35200 static tree
35201 rs6000_builtin_reciprocal (tree fndecl)
35203 switch (DECL_FUNCTION_CODE (fndecl))
35205 case VSX_BUILTIN_XVSQRTDP:
35206 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35207 return NULL_TREE;
35209 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35211 case VSX_BUILTIN_XVSQRTSP:
35212 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35213 return NULL_TREE;
35215 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35217 default:
35218 return NULL_TREE;
35222 /* Load up a constant. If the mode is a vector mode, splat the value across
35223 all of the vector elements. */
35225 static rtx
35226 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35228 rtx reg;
35230 if (mode == SFmode || mode == DFmode)
35232 rtx d = const_double_from_real_value (dconst, mode);
35233 reg = force_reg (mode, d);
35235 else if (mode == V4SFmode)
35237 rtx d = const_double_from_real_value (dconst, SFmode);
35238 rtvec v = gen_rtvec (4, d, d, d, d);
35239 reg = gen_reg_rtx (mode);
35240 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35242 else if (mode == V2DFmode)
35244 rtx d = const_double_from_real_value (dconst, DFmode);
35245 rtvec v = gen_rtvec (2, d, d);
35246 reg = gen_reg_rtx (mode);
35247 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35249 else
35250 gcc_unreachable ();
35252 return reg;
35255 /* Generate an FMA instruction. */
35257 static void
35258 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35260 machine_mode mode = GET_MODE (target);
35261 rtx dst;
35263 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35264 gcc_assert (dst != NULL);
35266 if (dst != target)
35267 emit_move_insn (target, dst);
35270 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35272 static void
35273 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35275 machine_mode mode = GET_MODE (dst);
35276 rtx r;
35278 /* This is a tad more complicated, since the fnma_optab is for
35279 a different expression: fma(-m1, m2, a), which is the same
35280 thing except in the case of signed zeros.
35282 Fortunately we know that if FMA is supported that FNMSUB is
35283 also supported in the ISA. Just expand it directly. */
35285 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35287 r = gen_rtx_NEG (mode, a);
35288 r = gen_rtx_FMA (mode, m1, m2, r);
35289 r = gen_rtx_NEG (mode, r);
35290 emit_insn (gen_rtx_SET (dst, r));
35293 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35294 add a reg_note saying that this was a division. Support both scalar and
35295 vector divide. Assumes no trapping math and finite arguments. */
35297 void
35298 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35300 machine_mode mode = GET_MODE (dst);
35301 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35302 int i;
35304 /* Low precision estimates guarantee 5 bits of accuracy. High
35305 precision estimates guarantee 14 bits of accuracy. SFmode
35306 requires 23 bits of accuracy. DFmode requires 52 bits of
35307 accuracy. Each pass at least doubles the accuracy, leading
35308 to the following. */
35309 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35310 if (mode == DFmode || mode == V2DFmode)
35311 passes++;
35313 enum insn_code code = optab_handler (smul_optab, mode);
35314 insn_gen_fn gen_mul = GEN_FCN (code);
35316 gcc_assert (code != CODE_FOR_nothing);
35318 one = rs6000_load_constant_and_splat (mode, dconst1);
35320 /* x0 = 1./d estimate */
35321 x0 = gen_reg_rtx (mode);
35322 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35323 UNSPEC_FRES)));
35325 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35326 if (passes > 1) {
35328 /* e0 = 1. - d * x0 */
35329 e0 = gen_reg_rtx (mode);
35330 rs6000_emit_nmsub (e0, d, x0, one);
35332 /* x1 = x0 + e0 * x0 */
35333 x1 = gen_reg_rtx (mode);
35334 rs6000_emit_madd (x1, e0, x0, x0);
35336 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35337 ++i, xprev = xnext, eprev = enext) {
35339 /* enext = eprev * eprev */
35340 enext = gen_reg_rtx (mode);
35341 emit_insn (gen_mul (enext, eprev, eprev));
35343 /* xnext = xprev + enext * xprev */
35344 xnext = gen_reg_rtx (mode);
35345 rs6000_emit_madd (xnext, enext, xprev, xprev);
35348 } else
35349 xprev = x0;
35351 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35353 /* u = n * xprev */
35354 u = gen_reg_rtx (mode);
35355 emit_insn (gen_mul (u, n, xprev));
35357 /* v = n - (d * u) */
35358 v = gen_reg_rtx (mode);
35359 rs6000_emit_nmsub (v, d, u, n);
35361 /* dst = (v * xprev) + u */
35362 rs6000_emit_madd (dst, v, xprev, u);
35364 if (note_p)
35365 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35368 /* Goldschmidt's Algorithm for single/double-precision floating point
35369 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35371 void
35372 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35374 machine_mode mode = GET_MODE (src);
35375 rtx e = gen_reg_rtx (mode);
35376 rtx g = gen_reg_rtx (mode);
35377 rtx h = gen_reg_rtx (mode);
35379 /* Low precision estimates guarantee 5 bits of accuracy. High
35380 precision estimates guarantee 14 bits of accuracy. SFmode
35381 requires 23 bits of accuracy. DFmode requires 52 bits of
35382 accuracy. Each pass at least doubles the accuracy, leading
35383 to the following. */
35384 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35385 if (mode == DFmode || mode == V2DFmode)
35386 passes++;
35388 int i;
35389 rtx mhalf;
35390 enum insn_code code = optab_handler (smul_optab, mode);
35391 insn_gen_fn gen_mul = GEN_FCN (code);
35393 gcc_assert (code != CODE_FOR_nothing);
35395 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35397 /* e = rsqrt estimate */
35398 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35399 UNSPEC_RSQRT)));
35401 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35402 if (!recip)
35404 rtx zero = force_reg (mode, CONST0_RTX (mode));
35406 if (mode == SFmode)
35408 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35409 e, zero, mode, 0);
35410 if (target != e)
35411 emit_move_insn (e, target);
35413 else
35415 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35416 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35420 /* g = sqrt estimate. */
35421 emit_insn (gen_mul (g, e, src));
35422 /* h = 1/(2*sqrt) estimate. */
35423 emit_insn (gen_mul (h, e, mhalf));
35425 if (recip)
35427 if (passes == 1)
35429 rtx t = gen_reg_rtx (mode);
35430 rs6000_emit_nmsub (t, g, h, mhalf);
35431 /* Apply correction directly to 1/rsqrt estimate. */
35432 rs6000_emit_madd (dst, e, t, e);
35434 else
35436 for (i = 0; i < passes; i++)
35438 rtx t1 = gen_reg_rtx (mode);
35439 rtx g1 = gen_reg_rtx (mode);
35440 rtx h1 = gen_reg_rtx (mode);
35442 rs6000_emit_nmsub (t1, g, h, mhalf);
35443 rs6000_emit_madd (g1, g, t1, g);
35444 rs6000_emit_madd (h1, h, t1, h);
35446 g = g1;
35447 h = h1;
35449 /* Multiply by 2 for 1/rsqrt. */
35450 emit_insn (gen_add3_insn (dst, h, h));
35453 else
35455 rtx t = gen_reg_rtx (mode);
35456 rs6000_emit_nmsub (t, g, h, mhalf);
35457 rs6000_emit_madd (dst, g, t, g);
35460 return;
35463 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35464 (Power7) targets. DST is the target, and SRC is the argument operand. */
35466 void
35467 rs6000_emit_popcount (rtx dst, rtx src)
35469 machine_mode mode = GET_MODE (dst);
35470 rtx tmp1, tmp2;
35472 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35473 if (TARGET_POPCNTD)
35475 if (mode == SImode)
35476 emit_insn (gen_popcntdsi2 (dst, src));
35477 else
35478 emit_insn (gen_popcntddi2 (dst, src));
35479 return;
35482 tmp1 = gen_reg_rtx (mode);
35484 if (mode == SImode)
35486 emit_insn (gen_popcntbsi2 (tmp1, src));
35487 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35488 NULL_RTX, 0);
35489 tmp2 = force_reg (SImode, tmp2);
35490 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35492 else
35494 emit_insn (gen_popcntbdi2 (tmp1, src));
35495 tmp2 = expand_mult (DImode, tmp1,
35496 GEN_INT ((HOST_WIDE_INT)
35497 0x01010101 << 32 | 0x01010101),
35498 NULL_RTX, 0);
35499 tmp2 = force_reg (DImode, tmp2);
35500 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35505 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35506 target, and SRC is the argument operand. */
35508 void
35509 rs6000_emit_parity (rtx dst, rtx src)
35511 machine_mode mode = GET_MODE (dst);
35512 rtx tmp;
35514 tmp = gen_reg_rtx (mode);
35516 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35517 if (TARGET_CMPB)
35519 if (mode == SImode)
35521 emit_insn (gen_popcntbsi2 (tmp, src));
35522 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35524 else
35526 emit_insn (gen_popcntbdi2 (tmp, src));
35527 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35529 return;
35532 if (mode == SImode)
35534 /* Is mult+shift >= shift+xor+shift+xor? */
35535 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35537 rtx tmp1, tmp2, tmp3, tmp4;
35539 tmp1 = gen_reg_rtx (SImode);
35540 emit_insn (gen_popcntbsi2 (tmp1, src));
35542 tmp2 = gen_reg_rtx (SImode);
35543 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35544 tmp3 = gen_reg_rtx (SImode);
35545 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35547 tmp4 = gen_reg_rtx (SImode);
35548 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35549 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35551 else
35552 rs6000_emit_popcount (tmp, src);
35553 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35555 else
35557 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35558 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35560 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35562 tmp1 = gen_reg_rtx (DImode);
35563 emit_insn (gen_popcntbdi2 (tmp1, src));
35565 tmp2 = gen_reg_rtx (DImode);
35566 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35567 tmp3 = gen_reg_rtx (DImode);
35568 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35570 tmp4 = gen_reg_rtx (DImode);
35571 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35572 tmp5 = gen_reg_rtx (DImode);
35573 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35575 tmp6 = gen_reg_rtx (DImode);
35576 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35577 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35579 else
35580 rs6000_emit_popcount (tmp, src);
35581 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35585 /* Expand an Altivec constant permutation for little endian mode.
35586 There are two issues: First, the two input operands must be
35587 swapped so that together they form a double-wide array in LE
35588 order. Second, the vperm instruction has surprising behavior
35589 in LE mode: it interprets the elements of the source vectors
35590 in BE mode ("left to right") and interprets the elements of
35591 the destination vector in LE mode ("right to left"). To
35592 correct for this, we must subtract each element of the permute
35593 control vector from 31.
35595 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35596 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35597 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35598 serve as the permute control vector. Then, in BE mode,
35600 vperm 9,10,11,12
35602 places the desired result in vr9. However, in LE mode the
35603 vector contents will be
35605 vr10 = 00000003 00000002 00000001 00000000
35606 vr11 = 00000007 00000006 00000005 00000004
35608 The result of the vperm using the same permute control vector is
35610 vr9 = 05000000 07000000 01000000 03000000
35612 That is, the leftmost 4 bytes of vr10 are interpreted as the
35613 source for the rightmost 4 bytes of vr9, and so on.
35615 If we change the permute control vector to
35617 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35619 and issue
35621 vperm 9,11,10,12
35623 we get the desired
35625 vr9 = 00000006 00000004 00000002 00000000. */
35627 void
35628 altivec_expand_vec_perm_const_le (rtx operands[4])
35630 unsigned int i;
35631 rtx perm[16];
35632 rtx constv, unspec;
35633 rtx target = operands[0];
35634 rtx op0 = operands[1];
35635 rtx op1 = operands[2];
35636 rtx sel = operands[3];
35638 /* Unpack and adjust the constant selector. */
35639 for (i = 0; i < 16; ++i)
35641 rtx e = XVECEXP (sel, 0, i);
35642 unsigned int elt = 31 - (INTVAL (e) & 31);
35643 perm[i] = GEN_INT (elt);
35646 /* Expand to a permute, swapping the inputs and using the
35647 adjusted selector. */
35648 if (!REG_P (op0))
35649 op0 = force_reg (V16QImode, op0);
35650 if (!REG_P (op1))
35651 op1 = force_reg (V16QImode, op1);
35653 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35654 constv = force_reg (V16QImode, constv);
35655 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35656 UNSPEC_VPERM);
35657 if (!REG_P (target))
35659 rtx tmp = gen_reg_rtx (V16QImode);
35660 emit_move_insn (tmp, unspec);
35661 unspec = tmp;
35664 emit_move_insn (target, unspec);
35667 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35668 permute control vector. But here it's not a constant, so we must
35669 generate a vector NAND or NOR to do the adjustment. */
35671 void
35672 altivec_expand_vec_perm_le (rtx operands[4])
35674 rtx notx, iorx, unspec;
35675 rtx target = operands[0];
35676 rtx op0 = operands[1];
35677 rtx op1 = operands[2];
35678 rtx sel = operands[3];
35679 rtx tmp = target;
35680 rtx norreg = gen_reg_rtx (V16QImode);
35681 machine_mode mode = GET_MODE (target);
35683 /* Get everything in regs so the pattern matches. */
35684 if (!REG_P (op0))
35685 op0 = force_reg (mode, op0);
35686 if (!REG_P (op1))
35687 op1 = force_reg (mode, op1);
35688 if (!REG_P (sel))
35689 sel = force_reg (V16QImode, sel);
35690 if (!REG_P (target))
35691 tmp = gen_reg_rtx (mode);
35693 if (TARGET_P9_VECTOR)
35695 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35696 UNSPEC_VPERMR);
35698 else
35700 /* Invert the selector with a VNAND if available, else a VNOR.
35701 The VNAND is preferred for future fusion opportunities. */
35702 notx = gen_rtx_NOT (V16QImode, sel);
35703 iorx = (TARGET_P8_VECTOR
35704 ? gen_rtx_IOR (V16QImode, notx, notx)
35705 : gen_rtx_AND (V16QImode, notx, notx));
35706 emit_insn (gen_rtx_SET (norreg, iorx));
35708 /* Permute with operands reversed and adjusted selector. */
35709 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35710 UNSPEC_VPERM);
35713 /* Copy into target, possibly by way of a register. */
35714 if (!REG_P (target))
35716 emit_move_insn (tmp, unspec);
35717 unspec = tmp;
35720 emit_move_insn (target, unspec);
35723 /* Expand an Altivec constant permutation. Return true if we match
35724 an efficient implementation; false to fall back to VPERM. */
35726 bool
35727 altivec_expand_vec_perm_const (rtx operands[4])
35729 struct altivec_perm_insn {
35730 HOST_WIDE_INT mask;
35731 enum insn_code impl;
35732 unsigned char perm[16];
35734 static const struct altivec_perm_insn patterns[] = {
35735 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35736 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35737 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35738 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35739 { OPTION_MASK_ALTIVEC,
35740 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35741 : CODE_FOR_altivec_vmrglb_direct),
35742 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35743 { OPTION_MASK_ALTIVEC,
35744 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35745 : CODE_FOR_altivec_vmrglh_direct),
35746 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35747 { OPTION_MASK_ALTIVEC,
35748 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35749 : CODE_FOR_altivec_vmrglw_direct),
35750 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35751 { OPTION_MASK_ALTIVEC,
35752 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35753 : CODE_FOR_altivec_vmrghb_direct),
35754 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35755 { OPTION_MASK_ALTIVEC,
35756 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35757 : CODE_FOR_altivec_vmrghh_direct),
35758 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35759 { OPTION_MASK_ALTIVEC,
35760 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35761 : CODE_FOR_altivec_vmrghw_direct),
35762 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35763 { OPTION_MASK_P8_VECTOR,
35764 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35765 : CODE_FOR_p8_vmrgow_v4sf_direct),
35766 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35767 { OPTION_MASK_P8_VECTOR,
35768 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35769 : CODE_FOR_p8_vmrgew_v4sf_direct),
35770 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35773 unsigned int i, j, elt, which;
35774 unsigned char perm[16];
35775 rtx target, op0, op1, sel, x;
35776 bool one_vec;
35778 target = operands[0];
35779 op0 = operands[1];
35780 op1 = operands[2];
35781 sel = operands[3];
35783 /* Unpack the constant selector. */
35784 for (i = which = 0; i < 16; ++i)
35786 rtx e = XVECEXP (sel, 0, i);
35787 elt = INTVAL (e) & 31;
35788 which |= (elt < 16 ? 1 : 2);
35789 perm[i] = elt;
35792 /* Simplify the constant selector based on operands. */
35793 switch (which)
35795 default:
35796 gcc_unreachable ();
35798 case 3:
35799 one_vec = false;
35800 if (!rtx_equal_p (op0, op1))
35801 break;
35802 /* FALLTHRU */
35804 case 2:
35805 for (i = 0; i < 16; ++i)
35806 perm[i] &= 15;
35807 op0 = op1;
35808 one_vec = true;
35809 break;
35811 case 1:
35812 op1 = op0;
35813 one_vec = true;
35814 break;
35817 /* Look for splat patterns. */
35818 if (one_vec)
35820 elt = perm[0];
35822 for (i = 0; i < 16; ++i)
35823 if (perm[i] != elt)
35824 break;
35825 if (i == 16)
35827 if (!BYTES_BIG_ENDIAN)
35828 elt = 15 - elt;
35829 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35830 return true;
35833 if (elt % 2 == 0)
35835 for (i = 0; i < 16; i += 2)
35836 if (perm[i] != elt || perm[i + 1] != elt + 1)
35837 break;
35838 if (i == 16)
35840 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35841 x = gen_reg_rtx (V8HImode);
35842 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35843 GEN_INT (field)));
35844 emit_move_insn (target, gen_lowpart (V16QImode, x));
35845 return true;
35849 if (elt % 4 == 0)
35851 for (i = 0; i < 16; i += 4)
35852 if (perm[i] != elt
35853 || perm[i + 1] != elt + 1
35854 || perm[i + 2] != elt + 2
35855 || perm[i + 3] != elt + 3)
35856 break;
35857 if (i == 16)
35859 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35860 x = gen_reg_rtx (V4SImode);
35861 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35862 GEN_INT (field)));
35863 emit_move_insn (target, gen_lowpart (V16QImode, x));
35864 return true;
35869 /* Look for merge and pack patterns. */
35870 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35872 bool swapped;
35874 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35875 continue;
35877 elt = patterns[j].perm[0];
35878 if (perm[0] == elt)
35879 swapped = false;
35880 else if (perm[0] == elt + 16)
35881 swapped = true;
35882 else
35883 continue;
35884 for (i = 1; i < 16; ++i)
35886 elt = patterns[j].perm[i];
35887 if (swapped)
35888 elt = (elt >= 16 ? elt - 16 : elt + 16);
35889 else if (one_vec && elt >= 16)
35890 elt -= 16;
35891 if (perm[i] != elt)
35892 break;
35894 if (i == 16)
35896 enum insn_code icode = patterns[j].impl;
35897 machine_mode omode = insn_data[icode].operand[0].mode;
35898 machine_mode imode = insn_data[icode].operand[1].mode;
35900 /* For little-endian, don't use vpkuwum and vpkuhum if the
35901 underlying vector type is not V4SI and V8HI, respectively.
35902 For example, using vpkuwum with a V8HI picks up the even
35903 halfwords (BE numbering) when the even halfwords (LE
35904 numbering) are what we need. */
35905 if (!BYTES_BIG_ENDIAN
35906 && icode == CODE_FOR_altivec_vpkuwum_direct
35907 && ((GET_CODE (op0) == REG
35908 && GET_MODE (op0) != V4SImode)
35909 || (GET_CODE (op0) == SUBREG
35910 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35911 continue;
35912 if (!BYTES_BIG_ENDIAN
35913 && icode == CODE_FOR_altivec_vpkuhum_direct
35914 && ((GET_CODE (op0) == REG
35915 && GET_MODE (op0) != V8HImode)
35916 || (GET_CODE (op0) == SUBREG
35917 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35918 continue;
35920 /* For little-endian, the two input operands must be swapped
35921 (or swapped back) to ensure proper right-to-left numbering
35922 from 0 to 2N-1. */
35923 if (swapped ^ !BYTES_BIG_ENDIAN)
35924 std::swap (op0, op1);
35925 if (imode != V16QImode)
35927 op0 = gen_lowpart (imode, op0);
35928 op1 = gen_lowpart (imode, op1);
35930 if (omode == V16QImode)
35931 x = target;
35932 else
35933 x = gen_reg_rtx (omode);
35934 emit_insn (GEN_FCN (icode) (x, op0, op1));
35935 if (omode != V16QImode)
35936 emit_move_insn (target, gen_lowpart (V16QImode, x));
35937 return true;
35941 if (!BYTES_BIG_ENDIAN)
35943 altivec_expand_vec_perm_const_le (operands);
35944 return true;
35947 return false;
35950 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35951 Return true if we match an efficient implementation. */
35953 static bool
35954 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35955 unsigned char perm0, unsigned char perm1)
35957 rtx x;
35959 /* If both selectors come from the same operand, fold to single op. */
35960 if ((perm0 & 2) == (perm1 & 2))
35962 if (perm0 & 2)
35963 op0 = op1;
35964 else
35965 op1 = op0;
35967 /* If both operands are equal, fold to simpler permutation. */
35968 if (rtx_equal_p (op0, op1))
35970 perm0 = perm0 & 1;
35971 perm1 = (perm1 & 1) + 2;
35973 /* If the first selector comes from the second operand, swap. */
35974 else if (perm0 & 2)
35976 if (perm1 & 2)
35977 return false;
35978 perm0 -= 2;
35979 perm1 += 2;
35980 std::swap (op0, op1);
35982 /* If the second selector does not come from the second operand, fail. */
35983 else if ((perm1 & 2) == 0)
35984 return false;
35986 /* Success! */
35987 if (target != NULL)
35989 machine_mode vmode, dmode;
35990 rtvec v;
35992 vmode = GET_MODE (target);
35993 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35994 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35995 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35996 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35997 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35998 emit_insn (gen_rtx_SET (target, x));
36000 return true;
36003 bool
36004 rs6000_expand_vec_perm_const (rtx operands[4])
36006 rtx target, op0, op1, sel;
36007 unsigned char perm0, perm1;
36009 target = operands[0];
36010 op0 = operands[1];
36011 op1 = operands[2];
36012 sel = operands[3];
36014 /* Unpack the constant selector. */
36015 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
36016 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
36018 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
36021 /* Test whether a constant permutation is supported. */
36023 static bool
36024 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode, vec_perm_indices sel)
36026 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
36027 if (TARGET_ALTIVEC)
36028 return true;
36030 /* Check for ps_merge* or evmerge* insns. */
36031 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
36033 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
36034 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
36035 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
36038 return false;
36041 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
36043 static void
36044 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
36045 machine_mode vmode, unsigned nelt, rtx perm[])
36047 machine_mode imode;
36048 rtx x;
36050 imode = vmode;
36051 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
36052 imode = mode_for_int_vector (vmode).require ();
36054 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
36055 x = expand_vec_perm (vmode, op0, op1, x, target);
36056 if (x != target)
36057 emit_move_insn (target, x);
36060 /* Expand an extract even operation. */
36062 void
36063 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
36065 machine_mode vmode = GET_MODE (target);
36066 unsigned i, nelt = GET_MODE_NUNITS (vmode);
36067 rtx perm[16];
36069 for (i = 0; i < nelt; i++)
36070 perm[i] = GEN_INT (i * 2);
36072 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
36075 /* Expand a vector interleave operation. */
36077 void
36078 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
36080 machine_mode vmode = GET_MODE (target);
36081 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
36082 rtx perm[16];
36084 high = (highp ? 0 : nelt / 2);
36085 for (i = 0; i < nelt / 2; i++)
36087 perm[i * 2] = GEN_INT (i + high);
36088 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
36091 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
36094 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
36095 void
36096 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
36098 HOST_WIDE_INT hwi_scale (scale);
36099 REAL_VALUE_TYPE r_pow;
36100 rtvec v = rtvec_alloc (2);
36101 rtx elt;
36102 rtx scale_vec = gen_reg_rtx (V2DFmode);
36103 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
36104 elt = const_double_from_real_value (r_pow, DFmode);
36105 RTVEC_ELT (v, 0) = elt;
36106 RTVEC_ELT (v, 1) = elt;
36107 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
36108 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
36111 /* Return an RTX representing where to find the function value of a
36112 function returning MODE. */
36113 static rtx
36114 rs6000_complex_function_value (machine_mode mode)
36116 unsigned int regno;
36117 rtx r1, r2;
36118 machine_mode inner = GET_MODE_INNER (mode);
36119 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
36121 if (TARGET_FLOAT128_TYPE
36122 && (mode == KCmode
36123 || (mode == TCmode && TARGET_IEEEQUAD)))
36124 regno = ALTIVEC_ARG_RETURN;
36126 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36127 regno = FP_ARG_RETURN;
36129 else
36131 regno = GP_ARG_RETURN;
36133 /* 32-bit is OK since it'll go in r3/r4. */
36134 if (TARGET_32BIT && inner_bytes >= 4)
36135 return gen_rtx_REG (mode, regno);
36138 if (inner_bytes >= 8)
36139 return gen_rtx_REG (mode, regno);
36141 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36142 const0_rtx);
36143 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36144 GEN_INT (inner_bytes));
36145 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36148 /* Return an rtx describing a return value of MODE as a PARALLEL
36149 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36150 stride REG_STRIDE. */
36152 static rtx
36153 rs6000_parallel_return (machine_mode mode,
36154 int n_elts, machine_mode elt_mode,
36155 unsigned int regno, unsigned int reg_stride)
36157 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36159 int i;
36160 for (i = 0; i < n_elts; i++)
36162 rtx r = gen_rtx_REG (elt_mode, regno);
36163 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36164 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36165 regno += reg_stride;
36168 return par;
36171 /* Target hook for TARGET_FUNCTION_VALUE.
36173 An integer value is in r3 and a floating-point value is in fp1,
36174 unless -msoft-float. */
36176 static rtx
36177 rs6000_function_value (const_tree valtype,
36178 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36179 bool outgoing ATTRIBUTE_UNUSED)
36181 machine_mode mode;
36182 unsigned int regno;
36183 machine_mode elt_mode;
36184 int n_elts;
36186 /* Special handling for structs in darwin64. */
36187 if (TARGET_MACHO
36188 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36190 CUMULATIVE_ARGS valcum;
36191 rtx valret;
36193 valcum.words = 0;
36194 valcum.fregno = FP_ARG_MIN_REG;
36195 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36196 /* Do a trial code generation as if this were going to be passed as
36197 an argument; if any part goes in memory, we return NULL. */
36198 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36199 if (valret)
36200 return valret;
36201 /* Otherwise fall through to standard ABI rules. */
36204 mode = TYPE_MODE (valtype);
36206 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36207 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36209 int first_reg, n_regs;
36211 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36213 /* _Decimal128 must use even/odd register pairs. */
36214 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36215 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36217 else
36219 first_reg = ALTIVEC_ARG_RETURN;
36220 n_regs = 1;
36223 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36226 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36227 if (TARGET_32BIT && TARGET_POWERPC64)
36228 switch (mode)
36230 default:
36231 break;
36232 case E_DImode:
36233 case E_SCmode:
36234 case E_DCmode:
36235 case E_TCmode:
36236 int count = GET_MODE_SIZE (mode) / 4;
36237 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36240 if ((INTEGRAL_TYPE_P (valtype)
36241 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36242 || POINTER_TYPE_P (valtype))
36243 mode = TARGET_32BIT ? SImode : DImode;
36245 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36246 /* _Decimal128 must use an even/odd register pair. */
36247 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36248 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36249 && !FLOAT128_VECTOR_P (mode)
36250 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
36251 regno = FP_ARG_RETURN;
36252 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36253 && targetm.calls.split_complex_arg)
36254 return rs6000_complex_function_value (mode);
36255 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36256 return register is used in both cases, and we won't see V2DImode/V2DFmode
36257 for pure altivec, combine the two cases. */
36258 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36259 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36260 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36261 regno = ALTIVEC_ARG_RETURN;
36262 else
36263 regno = GP_ARG_RETURN;
36265 return gen_rtx_REG (mode, regno);
36268 /* Define how to find the value returned by a library function
36269 assuming the value has mode MODE. */
36271 rs6000_libcall_value (machine_mode mode)
36273 unsigned int regno;
36275 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36276 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36277 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36279 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36280 /* _Decimal128 must use an even/odd register pair. */
36281 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36282 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
36283 && TARGET_HARD_FLOAT
36284 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
36285 regno = FP_ARG_RETURN;
36286 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36287 return register is used in both cases, and we won't see V2DImode/V2DFmode
36288 for pure altivec, combine the two cases. */
36289 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36290 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36291 regno = ALTIVEC_ARG_RETURN;
36292 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36293 return rs6000_complex_function_value (mode);
36294 else
36295 regno = GP_ARG_RETURN;
36297 return gen_rtx_REG (mode, regno);
36300 /* Compute register pressure classes. We implement the target hook to avoid
36301 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36302 lead to incorrect estimates of number of available registers and therefor
36303 increased register pressure/spill. */
36304 static int
36305 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36307 int n;
36309 n = 0;
36310 pressure_classes[n++] = GENERAL_REGS;
36311 if (TARGET_VSX)
36312 pressure_classes[n++] = VSX_REGS;
36313 else
36315 if (TARGET_ALTIVEC)
36316 pressure_classes[n++] = ALTIVEC_REGS;
36317 if (TARGET_HARD_FLOAT)
36318 pressure_classes[n++] = FLOAT_REGS;
36320 pressure_classes[n++] = CR_REGS;
36321 pressure_classes[n++] = SPECIAL_REGS;
36323 return n;
36326 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36327 Frame pointer elimination is automatically handled.
36329 For the RS/6000, if frame pointer elimination is being done, we would like
36330 to convert ap into fp, not sp.
36332 We need r30 if -mminimal-toc was specified, and there are constant pool
36333 references. */
36335 static bool
36336 rs6000_can_eliminate (const int from, const int to)
36338 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36339 ? ! frame_pointer_needed
36340 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36341 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36342 || constant_pool_empty_p ()
36343 : true);
36346 /* Define the offset between two registers, FROM to be eliminated and its
36347 replacement TO, at the start of a routine. */
36348 HOST_WIDE_INT
36349 rs6000_initial_elimination_offset (int from, int to)
36351 rs6000_stack_t *info = rs6000_stack_info ();
36352 HOST_WIDE_INT offset;
36354 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36355 offset = info->push_p ? 0 : -info->total_size;
36356 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36358 offset = info->push_p ? 0 : -info->total_size;
36359 if (FRAME_GROWS_DOWNWARD)
36360 offset += info->fixed_size + info->vars_size + info->parm_size;
36362 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36363 offset = FRAME_GROWS_DOWNWARD
36364 ? info->fixed_size + info->vars_size + info->parm_size
36365 : 0;
36366 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36367 offset = info->total_size;
36368 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36369 offset = info->push_p ? info->total_size : 0;
36370 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36371 offset = 0;
36372 else
36373 gcc_unreachable ();
36375 return offset;
36378 /* Fill in sizes of registers used by unwinder. */
36380 static void
36381 rs6000_init_dwarf_reg_sizes_extra (tree address)
36383 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36385 int i;
36386 machine_mode mode = TYPE_MODE (char_type_node);
36387 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36388 rtx mem = gen_rtx_MEM (BLKmode, addr);
36389 rtx value = gen_int_mode (16, mode);
36391 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36392 The unwinder still needs to know the size of Altivec registers. */
36394 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36396 int column = DWARF_REG_TO_UNWIND_COLUMN
36397 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36398 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36400 emit_move_insn (adjust_address (mem, mode, offset), value);
36405 /* Map internal gcc register numbers to debug format register numbers.
36406 FORMAT specifies the type of debug register number to use:
36407 0 -- debug information, except for frame-related sections
36408 1 -- DWARF .debug_frame section
36409 2 -- DWARF .eh_frame section */
36411 unsigned int
36412 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36414 /* Except for the above, we use the internal number for non-DWARF
36415 debug information, and also for .eh_frame. */
36416 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36417 return regno;
36419 /* On some platforms, we use the standard DWARF register
36420 numbering for .debug_info and .debug_frame. */
36421 #ifdef RS6000_USE_DWARF_NUMBERING
36422 if (regno <= 63)
36423 return regno;
36424 if (regno == LR_REGNO)
36425 return 108;
36426 if (regno == CTR_REGNO)
36427 return 109;
36428 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36429 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36430 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36431 to the DWARF reg for CR. */
36432 if (format == 1 && regno == CR2_REGNO)
36433 return 64;
36434 if (CR_REGNO_P (regno))
36435 return regno - CR0_REGNO + 86;
36436 if (regno == CA_REGNO)
36437 return 101; /* XER */
36438 if (ALTIVEC_REGNO_P (regno))
36439 return regno - FIRST_ALTIVEC_REGNO + 1124;
36440 if (regno == VRSAVE_REGNO)
36441 return 356;
36442 if (regno == VSCR_REGNO)
36443 return 67;
36444 #endif
36445 return regno;
36448 /* target hook eh_return_filter_mode */
36449 static scalar_int_mode
36450 rs6000_eh_return_filter_mode (void)
36452 return TARGET_32BIT ? SImode : word_mode;
36455 /* Target hook for scalar_mode_supported_p. */
36456 static bool
36457 rs6000_scalar_mode_supported_p (scalar_mode mode)
36459 /* -m32 does not support TImode. This is the default, from
36460 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36461 same ABI as for -m32. But default_scalar_mode_supported_p allows
36462 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36463 for -mpowerpc64. */
36464 if (TARGET_32BIT && mode == TImode)
36465 return false;
36467 if (DECIMAL_FLOAT_MODE_P (mode))
36468 return default_decimal_float_supported_p ();
36469 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36470 return true;
36471 else
36472 return default_scalar_mode_supported_p (mode);
36475 /* Target hook for vector_mode_supported_p. */
36476 static bool
36477 rs6000_vector_mode_supported_p (machine_mode mode)
36480 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36481 return true;
36483 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36484 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36485 double-double. */
36486 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36487 return true;
36489 else
36490 return false;
36493 /* Target hook for floatn_mode. */
36494 static opt_scalar_float_mode
36495 rs6000_floatn_mode (int n, bool extended)
36497 if (extended)
36499 switch (n)
36501 case 32:
36502 return DFmode;
36504 case 64:
36505 if (TARGET_FLOAT128_TYPE)
36506 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36507 else
36508 return opt_scalar_float_mode ();
36510 case 128:
36511 return opt_scalar_float_mode ();
36513 default:
36514 /* Those are the only valid _FloatNx types. */
36515 gcc_unreachable ();
36518 else
36520 switch (n)
36522 case 32:
36523 return SFmode;
36525 case 64:
36526 return DFmode;
36528 case 128:
36529 if (TARGET_FLOAT128_TYPE)
36530 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36531 else
36532 return opt_scalar_float_mode ();
36534 default:
36535 return opt_scalar_float_mode ();
36541 /* Target hook for c_mode_for_suffix. */
36542 static machine_mode
36543 rs6000_c_mode_for_suffix (char suffix)
36545 if (TARGET_FLOAT128_TYPE)
36547 if (suffix == 'q' || suffix == 'Q')
36548 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36550 /* At the moment, we are not defining a suffix for IBM extended double.
36551 If/when the default for -mabi=ieeelongdouble is changed, and we want
36552 to support __ibm128 constants in legacy library code, we may need to
36553 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36554 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36555 __float80 constants. */
36558 return VOIDmode;
36561 /* Target hook for invalid_arg_for_unprototyped_fn. */
36562 static const char *
36563 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36565 return (!rs6000_darwin64_abi
36566 && typelist == 0
36567 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36568 && (funcdecl == NULL_TREE
36569 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36570 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36571 ? N_("AltiVec argument passed to unprototyped function")
36572 : NULL;
36575 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36576 setup by using __stack_chk_fail_local hidden function instead of
36577 calling __stack_chk_fail directly. Otherwise it is better to call
36578 __stack_chk_fail directly. */
36580 static tree ATTRIBUTE_UNUSED
36581 rs6000_stack_protect_fail (void)
36583 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36584 ? default_hidden_stack_protect_fail ()
36585 : default_external_stack_protect_fail ();
36588 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36590 #if TARGET_ELF
36591 static unsigned HOST_WIDE_INT
36592 rs6000_asan_shadow_offset (void)
36594 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36596 #endif
36598 /* Mask options that we want to support inside of attribute((target)) and
36599 #pragma GCC target operations. Note, we do not include things like
36600 64/32-bit, endianness, hard/soft floating point, etc. that would have
36601 different calling sequences. */
36603 struct rs6000_opt_mask {
36604 const char *name; /* option name */
36605 HOST_WIDE_INT mask; /* mask to set */
36606 bool invert; /* invert sense of mask */
36607 bool valid_target; /* option is a target option */
36610 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36612 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36613 { "cmpb", OPTION_MASK_CMPB, false, true },
36614 { "crypto", OPTION_MASK_CRYPTO, false, true },
36615 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36616 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36617 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36618 false, true },
36619 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36620 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36621 { "fprnd", OPTION_MASK_FPRND, false, true },
36622 { "hard-dfp", OPTION_MASK_DFP, false, true },
36623 { "htm", OPTION_MASK_HTM, false, true },
36624 { "isel", OPTION_MASK_ISEL, false, true },
36625 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36626 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36627 { "modulo", OPTION_MASK_MODULO, false, true },
36628 { "mulhw", OPTION_MASK_MULHW, false, true },
36629 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36630 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36631 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36632 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36633 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36634 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36635 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36636 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36637 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36638 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36639 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36640 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36641 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36642 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36643 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36644 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36645 { "string", OPTION_MASK_STRING, false, true },
36646 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36647 { "update", OPTION_MASK_NO_UPDATE, true , true },
36648 { "vsx", OPTION_MASK_VSX, false, true },
36649 #ifdef OPTION_MASK_64BIT
36650 #if TARGET_AIX_OS
36651 { "aix64", OPTION_MASK_64BIT, false, false },
36652 { "aix32", OPTION_MASK_64BIT, true, false },
36653 #else
36654 { "64", OPTION_MASK_64BIT, false, false },
36655 { "32", OPTION_MASK_64BIT, true, false },
36656 #endif
36657 #endif
36658 #ifdef OPTION_MASK_EABI
36659 { "eabi", OPTION_MASK_EABI, false, false },
36660 #endif
36661 #ifdef OPTION_MASK_LITTLE_ENDIAN
36662 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36663 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36664 #endif
36665 #ifdef OPTION_MASK_RELOCATABLE
36666 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36667 #endif
36668 #ifdef OPTION_MASK_STRICT_ALIGN
36669 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36670 #endif
36671 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36672 { "string", OPTION_MASK_STRING, false, false },
36675 /* Builtin mask mapping for printing the flags. */
36676 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36678 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36679 { "vsx", RS6000_BTM_VSX, false, false },
36680 { "paired", RS6000_BTM_PAIRED, false, false },
36681 { "fre", RS6000_BTM_FRE, false, false },
36682 { "fres", RS6000_BTM_FRES, false, false },
36683 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36684 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36685 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36686 { "cell", RS6000_BTM_CELL, false, false },
36687 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36688 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36689 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36690 { "crypto", RS6000_BTM_CRYPTO, false, false },
36691 { "htm", RS6000_BTM_HTM, false, false },
36692 { "hard-dfp", RS6000_BTM_DFP, false, false },
36693 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36694 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36695 { "float128", RS6000_BTM_FLOAT128, false, false },
36696 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36699 /* Option variables that we want to support inside attribute((target)) and
36700 #pragma GCC target operations. */
36702 struct rs6000_opt_var {
36703 const char *name; /* option name */
36704 size_t global_offset; /* offset of the option in global_options. */
36705 size_t target_offset; /* offset of the option in target options. */
36708 static struct rs6000_opt_var const rs6000_opt_vars[] =
36710 { "friz",
36711 offsetof (struct gcc_options, x_TARGET_FRIZ),
36712 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36713 { "avoid-indexed-addresses",
36714 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36715 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36716 { "paired",
36717 offsetof (struct gcc_options, x_rs6000_paired_float),
36718 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36719 { "longcall",
36720 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36721 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36722 { "optimize-swaps",
36723 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36724 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36725 { "allow-movmisalign",
36726 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36727 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36728 { "sched-groups",
36729 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36730 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36731 { "always-hint",
36732 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36733 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36734 { "align-branch-targets",
36735 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36736 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36737 { "tls-markers",
36738 offsetof (struct gcc_options, x_tls_markers),
36739 offsetof (struct cl_target_option, x_tls_markers), },
36740 { "sched-prolog",
36741 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36742 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36743 { "sched-epilog",
36744 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36745 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36748 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36749 parsing. Return true if there were no errors. */
36751 static bool
36752 rs6000_inner_target_options (tree args, bool attr_p)
36754 bool ret = true;
36756 if (args == NULL_TREE)
36759 else if (TREE_CODE (args) == STRING_CST)
36761 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36762 char *q;
36764 while ((q = strtok (p, ",")) != NULL)
36766 bool error_p = false;
36767 bool not_valid_p = false;
36768 const char *cpu_opt = NULL;
36770 p = NULL;
36771 if (strncmp (q, "cpu=", 4) == 0)
36773 int cpu_index = rs6000_cpu_name_lookup (q+4);
36774 if (cpu_index >= 0)
36775 rs6000_cpu_index = cpu_index;
36776 else
36778 error_p = true;
36779 cpu_opt = q+4;
36782 else if (strncmp (q, "tune=", 5) == 0)
36784 int tune_index = rs6000_cpu_name_lookup (q+5);
36785 if (tune_index >= 0)
36786 rs6000_tune_index = tune_index;
36787 else
36789 error_p = true;
36790 cpu_opt = q+5;
36793 else
36795 size_t i;
36796 bool invert = false;
36797 char *r = q;
36799 error_p = true;
36800 if (strncmp (r, "no-", 3) == 0)
36802 invert = true;
36803 r += 3;
36806 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36807 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36809 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36811 if (!rs6000_opt_masks[i].valid_target)
36812 not_valid_p = true;
36813 else
36815 error_p = false;
36816 rs6000_isa_flags_explicit |= mask;
36818 /* VSX needs altivec, so -mvsx automagically sets
36819 altivec and disables -mavoid-indexed-addresses. */
36820 if (!invert)
36822 if (mask == OPTION_MASK_VSX)
36824 mask |= OPTION_MASK_ALTIVEC;
36825 TARGET_AVOID_XFORM = 0;
36829 if (rs6000_opt_masks[i].invert)
36830 invert = !invert;
36832 if (invert)
36833 rs6000_isa_flags &= ~mask;
36834 else
36835 rs6000_isa_flags |= mask;
36837 break;
36840 if (error_p && !not_valid_p)
36842 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36843 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36845 size_t j = rs6000_opt_vars[i].global_offset;
36846 *((int *) ((char *)&global_options + j)) = !invert;
36847 error_p = false;
36848 not_valid_p = false;
36849 break;
36854 if (error_p)
36856 const char *eprefix, *esuffix;
36858 ret = false;
36859 if (attr_p)
36861 eprefix = "__attribute__((__target__(";
36862 esuffix = ")))";
36864 else
36866 eprefix = "#pragma GCC target ";
36867 esuffix = "";
36870 if (cpu_opt)
36871 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36872 q, esuffix);
36873 else if (not_valid_p)
36874 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36875 else
36876 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36881 else if (TREE_CODE (args) == TREE_LIST)
36885 tree value = TREE_VALUE (args);
36886 if (value)
36888 bool ret2 = rs6000_inner_target_options (value, attr_p);
36889 if (!ret2)
36890 ret = false;
36892 args = TREE_CHAIN (args);
36894 while (args != NULL_TREE);
36897 else
36899 error ("attribute %<target%> argument not a string");
36900 return false;
36903 return ret;
36906 /* Print out the target options as a list for -mdebug=target. */
36908 static void
36909 rs6000_debug_target_options (tree args, const char *prefix)
36911 if (args == NULL_TREE)
36912 fprintf (stderr, "%s<NULL>", prefix);
36914 else if (TREE_CODE (args) == STRING_CST)
36916 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36917 char *q;
36919 while ((q = strtok (p, ",")) != NULL)
36921 p = NULL;
36922 fprintf (stderr, "%s\"%s\"", prefix, q);
36923 prefix = ", ";
36927 else if (TREE_CODE (args) == TREE_LIST)
36931 tree value = TREE_VALUE (args);
36932 if (value)
36934 rs6000_debug_target_options (value, prefix);
36935 prefix = ", ";
36937 args = TREE_CHAIN (args);
36939 while (args != NULL_TREE);
36942 else
36943 gcc_unreachable ();
36945 return;
36949 /* Hook to validate attribute((target("..."))). */
36951 static bool
36952 rs6000_valid_attribute_p (tree fndecl,
36953 tree ARG_UNUSED (name),
36954 tree args,
36955 int flags)
36957 struct cl_target_option cur_target;
36958 bool ret;
36959 tree old_optimize;
36960 tree new_target, new_optimize;
36961 tree func_optimize;
36963 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36965 if (TARGET_DEBUG_TARGET)
36967 tree tname = DECL_NAME (fndecl);
36968 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36969 if (tname)
36970 fprintf (stderr, "function: %.*s\n",
36971 (int) IDENTIFIER_LENGTH (tname),
36972 IDENTIFIER_POINTER (tname));
36973 else
36974 fprintf (stderr, "function: unknown\n");
36976 fprintf (stderr, "args:");
36977 rs6000_debug_target_options (args, " ");
36978 fprintf (stderr, "\n");
36980 if (flags)
36981 fprintf (stderr, "flags: 0x%x\n", flags);
36983 fprintf (stderr, "--------------------\n");
36986 /* attribute((target("default"))) does nothing, beyond
36987 affecting multi-versioning. */
36988 if (TREE_VALUE (args)
36989 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36990 && TREE_CHAIN (args) == NULL_TREE
36991 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36992 return true;
36994 old_optimize = build_optimization_node (&global_options);
36995 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36997 /* If the function changed the optimization levels as well as setting target
36998 options, start with the optimizations specified. */
36999 if (func_optimize && func_optimize != old_optimize)
37000 cl_optimization_restore (&global_options,
37001 TREE_OPTIMIZATION (func_optimize));
37003 /* The target attributes may also change some optimization flags, so update
37004 the optimization options if necessary. */
37005 cl_target_option_save (&cur_target, &global_options);
37006 rs6000_cpu_index = rs6000_tune_index = -1;
37007 ret = rs6000_inner_target_options (args, true);
37009 /* Set up any additional state. */
37010 if (ret)
37012 ret = rs6000_option_override_internal (false);
37013 new_target = build_target_option_node (&global_options);
37015 else
37016 new_target = NULL;
37018 new_optimize = build_optimization_node (&global_options);
37020 if (!new_target)
37021 ret = false;
37023 else if (fndecl)
37025 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
37027 if (old_optimize != new_optimize)
37028 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
37031 cl_target_option_restore (&global_options, &cur_target);
37033 if (old_optimize != new_optimize)
37034 cl_optimization_restore (&global_options,
37035 TREE_OPTIMIZATION (old_optimize));
37037 return ret;
37041 /* Hook to validate the current #pragma GCC target and set the state, and
37042 update the macros based on what was changed. If ARGS is NULL, then
37043 POP_TARGET is used to reset the options. */
37045 bool
37046 rs6000_pragma_target_parse (tree args, tree pop_target)
37048 tree prev_tree = build_target_option_node (&global_options);
37049 tree cur_tree;
37050 struct cl_target_option *prev_opt, *cur_opt;
37051 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
37052 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
37054 if (TARGET_DEBUG_TARGET)
37056 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
37057 fprintf (stderr, "args:");
37058 rs6000_debug_target_options (args, " ");
37059 fprintf (stderr, "\n");
37061 if (pop_target)
37063 fprintf (stderr, "pop_target:\n");
37064 debug_tree (pop_target);
37066 else
37067 fprintf (stderr, "pop_target: <NULL>\n");
37069 fprintf (stderr, "--------------------\n");
37072 if (! args)
37074 cur_tree = ((pop_target)
37075 ? pop_target
37076 : target_option_default_node);
37077 cl_target_option_restore (&global_options,
37078 TREE_TARGET_OPTION (cur_tree));
37080 else
37082 rs6000_cpu_index = rs6000_tune_index = -1;
37083 if (!rs6000_inner_target_options (args, false)
37084 || !rs6000_option_override_internal (false)
37085 || (cur_tree = build_target_option_node (&global_options))
37086 == NULL_TREE)
37088 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
37089 fprintf (stderr, "invalid pragma\n");
37091 return false;
37095 target_option_current_node = cur_tree;
37096 rs6000_activate_target_options (target_option_current_node);
37098 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
37099 change the macros that are defined. */
37100 if (rs6000_target_modify_macros_ptr)
37102 prev_opt = TREE_TARGET_OPTION (prev_tree);
37103 prev_bumask = prev_opt->x_rs6000_builtin_mask;
37104 prev_flags = prev_opt->x_rs6000_isa_flags;
37106 cur_opt = TREE_TARGET_OPTION (cur_tree);
37107 cur_flags = cur_opt->x_rs6000_isa_flags;
37108 cur_bumask = cur_opt->x_rs6000_builtin_mask;
37110 diff_bumask = (prev_bumask ^ cur_bumask);
37111 diff_flags = (prev_flags ^ cur_flags);
37113 if ((diff_flags != 0) || (diff_bumask != 0))
37115 /* Delete old macros. */
37116 rs6000_target_modify_macros_ptr (false,
37117 prev_flags & diff_flags,
37118 prev_bumask & diff_bumask);
37120 /* Define new macros. */
37121 rs6000_target_modify_macros_ptr (true,
37122 cur_flags & diff_flags,
37123 cur_bumask & diff_bumask);
37127 return true;
37131 /* Remember the last target of rs6000_set_current_function. */
37132 static GTY(()) tree rs6000_previous_fndecl;
37134 /* Restore target's globals from NEW_TREE and invalidate the
37135 rs6000_previous_fndecl cache. */
37137 void
37138 rs6000_activate_target_options (tree new_tree)
37140 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37141 if (TREE_TARGET_GLOBALS (new_tree))
37142 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37143 else if (new_tree == target_option_default_node)
37144 restore_target_globals (&default_target_globals);
37145 else
37146 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37147 rs6000_previous_fndecl = NULL_TREE;
37150 /* Establish appropriate back-end context for processing the function
37151 FNDECL. The argument might be NULL to indicate processing at top
37152 level, outside of any function scope. */
37153 static void
37154 rs6000_set_current_function (tree fndecl)
37156 if (TARGET_DEBUG_TARGET)
37158 fprintf (stderr, "\n==================== rs6000_set_current_function");
37160 if (fndecl)
37161 fprintf (stderr, ", fndecl %s (%p)",
37162 (DECL_NAME (fndecl)
37163 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37164 : "<unknown>"), (void *)fndecl);
37166 if (rs6000_previous_fndecl)
37167 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37169 fprintf (stderr, "\n");
37172 /* Only change the context if the function changes. This hook is called
37173 several times in the course of compiling a function, and we don't want to
37174 slow things down too much or call target_reinit when it isn't safe. */
37175 if (fndecl == rs6000_previous_fndecl)
37176 return;
37178 tree old_tree;
37179 if (rs6000_previous_fndecl == NULL_TREE)
37180 old_tree = target_option_current_node;
37181 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37182 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37183 else
37184 old_tree = target_option_default_node;
37186 tree new_tree;
37187 if (fndecl == NULL_TREE)
37189 if (old_tree != target_option_current_node)
37190 new_tree = target_option_current_node;
37191 else
37192 new_tree = NULL_TREE;
37194 else
37196 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37197 if (new_tree == NULL_TREE)
37198 new_tree = target_option_default_node;
37201 if (TARGET_DEBUG_TARGET)
37203 if (new_tree)
37205 fprintf (stderr, "\nnew fndecl target specific options:\n");
37206 debug_tree (new_tree);
37209 if (old_tree)
37211 fprintf (stderr, "\nold fndecl target specific options:\n");
37212 debug_tree (old_tree);
37215 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37216 fprintf (stderr, "--------------------\n");
37219 if (new_tree && old_tree != new_tree)
37220 rs6000_activate_target_options (new_tree);
37222 if (fndecl)
37223 rs6000_previous_fndecl = fndecl;
37227 /* Save the current options */
37229 static void
37230 rs6000_function_specific_save (struct cl_target_option *ptr,
37231 struct gcc_options *opts)
37233 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37234 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37237 /* Restore the current options */
37239 static void
37240 rs6000_function_specific_restore (struct gcc_options *opts,
37241 struct cl_target_option *ptr)
37244 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37245 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37246 (void) rs6000_option_override_internal (false);
37249 /* Print the current options */
37251 static void
37252 rs6000_function_specific_print (FILE *file, int indent,
37253 struct cl_target_option *ptr)
37255 rs6000_print_isa_options (file, indent, "Isa options set",
37256 ptr->x_rs6000_isa_flags);
37258 rs6000_print_isa_options (file, indent, "Isa options explicit",
37259 ptr->x_rs6000_isa_flags_explicit);
37262 /* Helper function to print the current isa or misc options on a line. */
37264 static void
37265 rs6000_print_options_internal (FILE *file,
37266 int indent,
37267 const char *string,
37268 HOST_WIDE_INT flags,
37269 const char *prefix,
37270 const struct rs6000_opt_mask *opts,
37271 size_t num_elements)
37273 size_t i;
37274 size_t start_column = 0;
37275 size_t cur_column;
37276 size_t max_column = 120;
37277 size_t prefix_len = strlen (prefix);
37278 size_t comma_len = 0;
37279 const char *comma = "";
37281 if (indent)
37282 start_column += fprintf (file, "%*s", indent, "");
37284 if (!flags)
37286 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37287 return;
37290 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37292 /* Print the various mask options. */
37293 cur_column = start_column;
37294 for (i = 0; i < num_elements; i++)
37296 bool invert = opts[i].invert;
37297 const char *name = opts[i].name;
37298 const char *no_str = "";
37299 HOST_WIDE_INT mask = opts[i].mask;
37300 size_t len = comma_len + prefix_len + strlen (name);
37302 if (!invert)
37304 if ((flags & mask) == 0)
37306 no_str = "no-";
37307 len += sizeof ("no-") - 1;
37310 flags &= ~mask;
37313 else
37315 if ((flags & mask) != 0)
37317 no_str = "no-";
37318 len += sizeof ("no-") - 1;
37321 flags |= mask;
37324 cur_column += len;
37325 if (cur_column > max_column)
37327 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37328 cur_column = start_column + len;
37329 comma = "";
37332 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37333 comma = ", ";
37334 comma_len = sizeof (", ") - 1;
37337 fputs ("\n", file);
37340 /* Helper function to print the current isa options on a line. */
37342 static void
37343 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37344 HOST_WIDE_INT flags)
37346 rs6000_print_options_internal (file, indent, string, flags, "-m",
37347 &rs6000_opt_masks[0],
37348 ARRAY_SIZE (rs6000_opt_masks));
37351 static void
37352 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37353 HOST_WIDE_INT flags)
37355 rs6000_print_options_internal (file, indent, string, flags, "",
37356 &rs6000_builtin_mask_names[0],
37357 ARRAY_SIZE (rs6000_builtin_mask_names));
37360 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37361 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37362 -mupper-regs-df, etc.).
37364 If the user used -mno-power8-vector, we need to turn off all of the implicit
37365 ISA 2.07 and 3.0 options that relate to the vector unit.
37367 If the user used -mno-power9-vector, we need to turn off all of the implicit
37368 ISA 3.0 options that relate to the vector unit.
37370 This function does not handle explicit options such as the user specifying
37371 -mdirect-move. These are handled in rs6000_option_override_internal, and
37372 the appropriate error is given if needed.
37374 We return a mask of all of the implicit options that should not be enabled
37375 by default. */
37377 static HOST_WIDE_INT
37378 rs6000_disable_incompatible_switches (void)
37380 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37381 size_t i, j;
37383 static const struct {
37384 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37385 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37386 const char *const name; /* name of the switch. */
37387 } flags[] = {
37388 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37389 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37390 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37393 for (i = 0; i < ARRAY_SIZE (flags); i++)
37395 HOST_WIDE_INT no_flag = flags[i].no_flag;
37397 if ((rs6000_isa_flags & no_flag) == 0
37398 && (rs6000_isa_flags_explicit & no_flag) != 0)
37400 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37401 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37402 & rs6000_isa_flags
37403 & dep_flags);
37405 if (set_flags)
37407 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37408 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37410 set_flags &= ~rs6000_opt_masks[j].mask;
37411 error ("%<-mno-%s%> turns off %<-m%s%>",
37412 flags[i].name,
37413 rs6000_opt_masks[j].name);
37416 gcc_assert (!set_flags);
37419 rs6000_isa_flags &= ~dep_flags;
37420 ignore_masks |= no_flag | dep_flags;
37424 return ignore_masks;
37428 /* Helper function for printing the function name when debugging. */
37430 static const char *
37431 get_decl_name (tree fn)
37433 tree name;
37435 if (!fn)
37436 return "<null>";
37438 name = DECL_NAME (fn);
37439 if (!name)
37440 return "<no-name>";
37442 return IDENTIFIER_POINTER (name);
37445 /* Return the clone id of the target we are compiling code for in a target
37446 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37447 the priority list for the target clones (ordered from lowest to
37448 highest). */
37450 static int
37451 rs6000_clone_priority (tree fndecl)
37453 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37454 HOST_WIDE_INT isa_masks;
37455 int ret = CLONE_DEFAULT;
37456 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37457 const char *attrs_str = NULL;
37459 attrs = TREE_VALUE (TREE_VALUE (attrs));
37460 attrs_str = TREE_STRING_POINTER (attrs);
37462 /* Return priority zero for default function. Return the ISA needed for the
37463 function if it is not the default. */
37464 if (strcmp (attrs_str, "default") != 0)
37466 if (fn_opts == NULL_TREE)
37467 fn_opts = target_option_default_node;
37469 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37470 isa_masks = rs6000_isa_flags;
37471 else
37472 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37474 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37475 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37476 break;
37479 if (TARGET_DEBUG_TARGET)
37480 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37481 get_decl_name (fndecl), ret);
37483 return ret;
37486 /* This compares the priority of target features in function DECL1 and DECL2.
37487 It returns positive value if DECL1 is higher priority, negative value if
37488 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37489 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37491 static int
37492 rs6000_compare_version_priority (tree decl1, tree decl2)
37494 int priority1 = rs6000_clone_priority (decl1);
37495 int priority2 = rs6000_clone_priority (decl2);
37496 int ret = priority1 - priority2;
37498 if (TARGET_DEBUG_TARGET)
37499 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37500 get_decl_name (decl1), get_decl_name (decl2), ret);
37502 return ret;
37505 /* Make a dispatcher declaration for the multi-versioned function DECL.
37506 Calls to DECL function will be replaced with calls to the dispatcher
37507 by the front-end. Returns the decl of the dispatcher function. */
37509 static tree
37510 rs6000_get_function_versions_dispatcher (void *decl)
37512 tree fn = (tree) decl;
37513 struct cgraph_node *node = NULL;
37514 struct cgraph_node *default_node = NULL;
37515 struct cgraph_function_version_info *node_v = NULL;
37516 struct cgraph_function_version_info *first_v = NULL;
37518 tree dispatch_decl = NULL;
37520 struct cgraph_function_version_info *default_version_info = NULL;
37521 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37523 if (TARGET_DEBUG_TARGET)
37524 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37525 get_decl_name (fn));
37527 node = cgraph_node::get (fn);
37528 gcc_assert (node != NULL);
37530 node_v = node->function_version ();
37531 gcc_assert (node_v != NULL);
37533 if (node_v->dispatcher_resolver != NULL)
37534 return node_v->dispatcher_resolver;
37536 /* Find the default version and make it the first node. */
37537 first_v = node_v;
37538 /* Go to the beginning of the chain. */
37539 while (first_v->prev != NULL)
37540 first_v = first_v->prev;
37542 default_version_info = first_v;
37543 while (default_version_info != NULL)
37545 const tree decl2 = default_version_info->this_node->decl;
37546 if (is_function_default_version (decl2))
37547 break;
37548 default_version_info = default_version_info->next;
37551 /* If there is no default node, just return NULL. */
37552 if (default_version_info == NULL)
37553 return NULL;
37555 /* Make default info the first node. */
37556 if (first_v != default_version_info)
37558 default_version_info->prev->next = default_version_info->next;
37559 if (default_version_info->next)
37560 default_version_info->next->prev = default_version_info->prev;
37561 first_v->prev = default_version_info;
37562 default_version_info->next = first_v;
37563 default_version_info->prev = NULL;
37566 default_node = default_version_info->this_node;
37568 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37569 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37570 "target_clones attribute needs GLIBC (2.23 and newer) that "
37571 "exports hardware capability bits");
37572 #else
37574 if (targetm.has_ifunc_p ())
37576 struct cgraph_function_version_info *it_v = NULL;
37577 struct cgraph_node *dispatcher_node = NULL;
37578 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37580 /* Right now, the dispatching is done via ifunc. */
37581 dispatch_decl = make_dispatcher_decl (default_node->decl);
37583 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37584 gcc_assert (dispatcher_node != NULL);
37585 dispatcher_node->dispatcher_function = 1;
37586 dispatcher_version_info
37587 = dispatcher_node->insert_new_function_version ();
37588 dispatcher_version_info->next = default_version_info;
37589 dispatcher_node->definition = 1;
37591 /* Set the dispatcher for all the versions. */
37592 it_v = default_version_info;
37593 while (it_v != NULL)
37595 it_v->dispatcher_resolver = dispatch_decl;
37596 it_v = it_v->next;
37599 else
37601 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37602 "multiversioning needs ifunc which is not supported "
37603 "on this target");
37605 #endif
37607 return dispatch_decl;
37610 /* Make the resolver function decl to dispatch the versions of a multi-
37611 versioned function, DEFAULT_DECL. Create an empty basic block in the
37612 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37613 function. */
37615 static tree
37616 make_resolver_func (const tree default_decl,
37617 const tree dispatch_decl,
37618 basic_block *empty_bb)
37620 /* Make the resolver function static. The resolver function returns
37621 void *. */
37622 tree decl_name = clone_function_name (default_decl, "resolver");
37623 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37624 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37625 tree decl = build_fn_decl (resolver_name, type);
37626 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37628 DECL_NAME (decl) = decl_name;
37629 TREE_USED (decl) = 1;
37630 DECL_ARTIFICIAL (decl) = 1;
37631 DECL_IGNORED_P (decl) = 0;
37632 TREE_PUBLIC (decl) = 0;
37633 DECL_UNINLINABLE (decl) = 1;
37635 /* Resolver is not external, body is generated. */
37636 DECL_EXTERNAL (decl) = 0;
37637 DECL_EXTERNAL (dispatch_decl) = 0;
37639 DECL_CONTEXT (decl) = NULL_TREE;
37640 DECL_INITIAL (decl) = make_node (BLOCK);
37641 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37643 /* Build result decl and add to function_decl. */
37644 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37645 DECL_ARTIFICIAL (t) = 1;
37646 DECL_IGNORED_P (t) = 1;
37647 DECL_RESULT (decl) = t;
37649 gimplify_function_tree (decl);
37650 push_cfun (DECL_STRUCT_FUNCTION (decl));
37651 *empty_bb = init_lowered_empty_function (decl, false,
37652 profile_count::uninitialized ());
37654 cgraph_node::add_new_function (decl, true);
37655 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37657 pop_cfun ();
37659 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37660 DECL_ATTRIBUTES (dispatch_decl)
37661 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37663 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37665 return decl;
37668 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37669 return a pointer to VERSION_DECL if we are running on a machine that
37670 supports the index CLONE_ISA hardware architecture bits. This function will
37671 be called during version dispatch to decide which function version to
37672 execute. It returns the basic block at the end, to which more conditions
37673 can be added. */
37675 static basic_block
37676 add_condition_to_bb (tree function_decl, tree version_decl,
37677 int clone_isa, basic_block new_bb)
37679 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37681 gcc_assert (new_bb != NULL);
37682 gimple_seq gseq = bb_seq (new_bb);
37685 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37686 build_fold_addr_expr (version_decl));
37687 tree result_var = create_tmp_var (ptr_type_node);
37688 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37689 gimple *return_stmt = gimple_build_return (result_var);
37691 if (clone_isa == CLONE_DEFAULT)
37693 gimple_seq_add_stmt (&gseq, convert_stmt);
37694 gimple_seq_add_stmt (&gseq, return_stmt);
37695 set_bb_seq (new_bb, gseq);
37696 gimple_set_bb (convert_stmt, new_bb);
37697 gimple_set_bb (return_stmt, new_bb);
37698 pop_cfun ();
37699 return new_bb;
37702 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37703 tree cond_var = create_tmp_var (bool_int_type_node);
37704 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37705 const char *arg_str = rs6000_clone_map[clone_isa].name;
37706 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37707 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37708 gimple_call_set_lhs (call_cond_stmt, cond_var);
37710 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37711 gimple_set_bb (call_cond_stmt, new_bb);
37712 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37714 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37715 NULL_TREE, NULL_TREE);
37716 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37717 gimple_set_bb (if_else_stmt, new_bb);
37718 gimple_seq_add_stmt (&gseq, if_else_stmt);
37720 gimple_seq_add_stmt (&gseq, convert_stmt);
37721 gimple_seq_add_stmt (&gseq, return_stmt);
37722 set_bb_seq (new_bb, gseq);
37724 basic_block bb1 = new_bb;
37725 edge e12 = split_block (bb1, if_else_stmt);
37726 basic_block bb2 = e12->dest;
37727 e12->flags &= ~EDGE_FALLTHRU;
37728 e12->flags |= EDGE_TRUE_VALUE;
37730 edge e23 = split_block (bb2, return_stmt);
37731 gimple_set_bb (convert_stmt, bb2);
37732 gimple_set_bb (return_stmt, bb2);
37734 basic_block bb3 = e23->dest;
37735 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37737 remove_edge (e23);
37738 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37740 pop_cfun ();
37741 return bb3;
37744 /* This function generates the dispatch function for multi-versioned functions.
37745 DISPATCH_DECL is the function which will contain the dispatch logic.
37746 FNDECLS are the function choices for dispatch, and is a tree chain.
37747 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37748 code is generated. */
37750 static int
37751 dispatch_function_versions (tree dispatch_decl,
37752 void *fndecls_p,
37753 basic_block *empty_bb)
37755 int ix;
37756 tree ele;
37757 vec<tree> *fndecls;
37758 tree clones[CLONE_MAX];
37760 if (TARGET_DEBUG_TARGET)
37761 fputs ("dispatch_function_versions, top\n", stderr);
37763 gcc_assert (dispatch_decl != NULL
37764 && fndecls_p != NULL
37765 && empty_bb != NULL);
37767 /* fndecls_p is actually a vector. */
37768 fndecls = static_cast<vec<tree> *> (fndecls_p);
37770 /* At least one more version other than the default. */
37771 gcc_assert (fndecls->length () >= 2);
37773 /* The first version in the vector is the default decl. */
37774 memset ((void *) clones, '\0', sizeof (clones));
37775 clones[CLONE_DEFAULT] = (*fndecls)[0];
37777 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37778 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37779 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37780 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37781 to insert the code here to do the call. */
37783 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37785 int priority = rs6000_clone_priority (ele);
37786 if (!clones[priority])
37787 clones[priority] = ele;
37790 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37791 if (clones[ix])
37793 if (TARGET_DEBUG_TARGET)
37794 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37795 ix, get_decl_name (clones[ix]));
37797 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37798 *empty_bb);
37801 return 0;
37804 /* Generate the dispatching code body to dispatch multi-versioned function
37805 DECL. The target hook is called to process the "target" attributes and
37806 provide the code to dispatch the right function at run-time. NODE points
37807 to the dispatcher decl whose body will be created. */
37809 static tree
37810 rs6000_generate_version_dispatcher_body (void *node_p)
37812 tree resolver;
37813 basic_block empty_bb;
37814 struct cgraph_node *node = (cgraph_node *) node_p;
37815 struct cgraph_function_version_info *ninfo = node->function_version ();
37817 if (ninfo->dispatcher_resolver)
37818 return ninfo->dispatcher_resolver;
37820 /* node is going to be an alias, so remove the finalized bit. */
37821 node->definition = false;
37823 /* The first version in the chain corresponds to the default version. */
37824 ninfo->dispatcher_resolver = resolver
37825 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37827 if (TARGET_DEBUG_TARGET)
37828 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37829 get_decl_name (resolver));
37831 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37832 auto_vec<tree, 2> fn_ver_vec;
37834 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37835 vinfo;
37836 vinfo = vinfo->next)
37838 struct cgraph_node *version = vinfo->this_node;
37839 /* Check for virtual functions here again, as by this time it should
37840 have been determined if this function needs a vtable index or
37841 not. This happens for methods in derived classes that override
37842 virtual methods in base classes but are not explicitly marked as
37843 virtual. */
37844 if (DECL_VINDEX (version->decl))
37845 sorry ("Virtual function multiversioning not supported");
37847 fn_ver_vec.safe_push (version->decl);
37850 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37851 cgraph_edge::rebuild_edges ();
37852 pop_cfun ();
37853 return resolver;
37857 /* Hook to determine if one function can safely inline another. */
37859 static bool
37860 rs6000_can_inline_p (tree caller, tree callee)
37862 bool ret = false;
37863 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37864 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37866 /* If callee has no option attributes, then it is ok to inline. */
37867 if (!callee_tree)
37868 ret = true;
37870 /* If caller has no option attributes, but callee does then it is not ok to
37871 inline. */
37872 else if (!caller_tree)
37873 ret = false;
37875 else
37877 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37878 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37880 /* Callee's options should a subset of the caller's, i.e. a vsx function
37881 can inline an altivec function but a non-vsx function can't inline a
37882 vsx function. */
37883 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37884 == callee_opts->x_rs6000_isa_flags)
37885 ret = true;
37888 if (TARGET_DEBUG_TARGET)
37889 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37890 get_decl_name (caller), get_decl_name (callee),
37891 (ret ? "can" : "cannot"));
37893 return ret;
37896 /* Allocate a stack temp and fixup the address so it meets the particular
37897 memory requirements (either offetable or REG+REG addressing). */
37900 rs6000_allocate_stack_temp (machine_mode mode,
37901 bool offsettable_p,
37902 bool reg_reg_p)
37904 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37905 rtx addr = XEXP (stack, 0);
37906 int strict_p = reload_completed;
37908 if (!legitimate_indirect_address_p (addr, strict_p))
37910 if (offsettable_p
37911 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37912 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37914 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37915 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37918 return stack;
37921 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37922 to such a form to deal with memory reference instructions like STFIWX that
37923 only take reg+reg addressing. */
37926 rs6000_address_for_fpconvert (rtx x)
37928 rtx addr;
37930 gcc_assert (MEM_P (x));
37931 addr = XEXP (x, 0);
37932 if (! legitimate_indirect_address_p (addr, reload_completed)
37933 && ! legitimate_indexed_address_p (addr, reload_completed))
37935 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37937 rtx reg = XEXP (addr, 0);
37938 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37939 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37940 gcc_assert (REG_P (reg));
37941 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37942 addr = reg;
37944 else if (GET_CODE (addr) == PRE_MODIFY)
37946 rtx reg = XEXP (addr, 0);
37947 rtx expr = XEXP (addr, 1);
37948 gcc_assert (REG_P (reg));
37949 gcc_assert (GET_CODE (expr) == PLUS);
37950 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37951 addr = reg;
37954 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37957 return x;
37960 /* Given a memory reference, if it is not in the form for altivec memory
37961 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37962 convert to the altivec format. */
37965 rs6000_address_for_altivec (rtx x)
37967 gcc_assert (MEM_P (x));
37968 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37970 rtx addr = XEXP (x, 0);
37972 if (!legitimate_indexed_address_p (addr, reload_completed)
37973 && !legitimate_indirect_address_p (addr, reload_completed))
37974 addr = copy_to_mode_reg (Pmode, addr);
37976 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37977 x = change_address (x, GET_MODE (x), addr);
37980 return x;
37983 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37985 On the RS/6000, all integer constants are acceptable, most won't be valid
37986 for particular insns, though. Only easy FP constants are acceptable. */
37988 static bool
37989 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37991 if (TARGET_ELF && tls_referenced_p (x))
37992 return false;
37994 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37995 || GET_MODE (x) == VOIDmode
37996 || (TARGET_POWERPC64 && mode == DImode)
37997 || easy_fp_constant (x, mode)
37998 || easy_vector_constant (x, mode));
38002 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
38004 static bool
38005 chain_already_loaded (rtx_insn *last)
38007 for (; last != NULL; last = PREV_INSN (last))
38009 if (NONJUMP_INSN_P (last))
38011 rtx patt = PATTERN (last);
38013 if (GET_CODE (patt) == SET)
38015 rtx lhs = XEXP (patt, 0);
38017 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
38018 return true;
38022 return false;
38025 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
38027 void
38028 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38030 const bool direct_call_p
38031 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
38032 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
38033 rtx toc_load = NULL_RTX;
38034 rtx toc_restore = NULL_RTX;
38035 rtx func_addr;
38036 rtx abi_reg = NULL_RTX;
38037 rtx call[4];
38038 int n_call;
38039 rtx insn;
38041 /* Handle longcall attributes. */
38042 if (INTVAL (cookie) & CALL_LONG)
38043 func_desc = rs6000_longcall_ref (func_desc);
38045 /* Handle indirect calls. */
38046 if (GET_CODE (func_desc) != SYMBOL_REF
38047 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
38049 /* Save the TOC into its reserved slot before the call,
38050 and prepare to restore it after the call. */
38051 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
38052 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
38053 rtx stack_toc_mem = gen_frame_mem (Pmode,
38054 gen_rtx_PLUS (Pmode, stack_ptr,
38055 stack_toc_offset));
38056 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
38057 gen_rtvec (1, stack_toc_offset),
38058 UNSPEC_TOCSLOT);
38059 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
38061 /* Can we optimize saving the TOC in the prologue or
38062 do we need to do it at every call? */
38063 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
38064 cfun->machine->save_toc_in_prologue = true;
38065 else
38067 MEM_VOLATILE_P (stack_toc_mem) = 1;
38068 emit_move_insn (stack_toc_mem, toc_reg);
38071 if (DEFAULT_ABI == ABI_ELFv2)
38073 /* A function pointer in the ELFv2 ABI is just a plain address, but
38074 the ABI requires it to be loaded into r12 before the call. */
38075 func_addr = gen_rtx_REG (Pmode, 12);
38076 emit_move_insn (func_addr, func_desc);
38077 abi_reg = func_addr;
38079 else
38081 /* A function pointer under AIX is a pointer to a data area whose
38082 first word contains the actual address of the function, whose
38083 second word contains a pointer to its TOC, and whose third word
38084 contains a value to place in the static chain register (r11).
38085 Note that if we load the static chain, our "trampoline" need
38086 not have any executable code. */
38088 /* Load up address of the actual function. */
38089 func_desc = force_reg (Pmode, func_desc);
38090 func_addr = gen_reg_rtx (Pmode);
38091 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
38093 /* Prepare to load the TOC of the called function. Note that the
38094 TOC load must happen immediately before the actual call so
38095 that unwinding the TOC registers works correctly. See the
38096 comment in frob_update_context. */
38097 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
38098 rtx func_toc_mem = gen_rtx_MEM (Pmode,
38099 gen_rtx_PLUS (Pmode, func_desc,
38100 func_toc_offset));
38101 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
38103 /* If we have a static chain, load it up. But, if the call was
38104 originally direct, the 3rd word has not been written since no
38105 trampoline has been built, so we ought not to load it, lest we
38106 override a static chain value. */
38107 if (!direct_call_p
38108 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
38109 && !chain_already_loaded (get_current_sequence ()->next->last))
38111 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
38112 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
38113 rtx func_sc_mem = gen_rtx_MEM (Pmode,
38114 gen_rtx_PLUS (Pmode, func_desc,
38115 func_sc_offset));
38116 emit_move_insn (sc_reg, func_sc_mem);
38117 abi_reg = sc_reg;
38121 else
38123 /* Direct calls use the TOC: for local calls, the callee will
38124 assume the TOC register is set; for non-local calls, the
38125 PLT stub needs the TOC register. */
38126 abi_reg = toc_reg;
38127 func_addr = func_desc;
38130 /* Create the call. */
38131 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
38132 if (value != NULL_RTX)
38133 call[0] = gen_rtx_SET (value, call[0]);
38134 n_call = 1;
38136 if (toc_load)
38137 call[n_call++] = toc_load;
38138 if (toc_restore)
38139 call[n_call++] = toc_restore;
38141 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
38143 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38144 insn = emit_call_insn (insn);
38146 /* Mention all registers defined by the ABI to hold information
38147 as uses in CALL_INSN_FUNCTION_USAGE. */
38148 if (abi_reg)
38149 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38152 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38154 void
38155 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
38157 rtx call[2];
38158 rtx insn;
38160 gcc_assert (INTVAL (cookie) == 0);
38162 /* Create the call. */
38163 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
38164 if (value != NULL_RTX)
38165 call[0] = gen_rtx_SET (value, call[0]);
38167 call[1] = simple_return_rtx;
38169 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38170 insn = emit_call_insn (insn);
38172 /* Note use of the TOC register. */
38173 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38176 /* Return whether we need to always update the saved TOC pointer when we update
38177 the stack pointer. */
38179 static bool
38180 rs6000_save_toc_in_prologue_p (void)
38182 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38185 #ifdef HAVE_GAS_HIDDEN
38186 # define USE_HIDDEN_LINKONCE 1
38187 #else
38188 # define USE_HIDDEN_LINKONCE 0
38189 #endif
38191 /* Fills in the label name that should be used for a 476 link stack thunk. */
38193 void
38194 get_ppc476_thunk_name (char name[32])
38196 gcc_assert (TARGET_LINK_STACK);
38198 if (USE_HIDDEN_LINKONCE)
38199 sprintf (name, "__ppc476.get_thunk");
38200 else
38201 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38204 /* This function emits the simple thunk routine that is used to preserve
38205 the link stack on the 476 cpu. */
38207 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38208 static void
38209 rs6000_code_end (void)
38211 char name[32];
38212 tree decl;
38214 if (!TARGET_LINK_STACK)
38215 return;
38217 get_ppc476_thunk_name (name);
38219 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38220 build_function_type_list (void_type_node, NULL_TREE));
38221 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38222 NULL_TREE, void_type_node);
38223 TREE_PUBLIC (decl) = 1;
38224 TREE_STATIC (decl) = 1;
38226 #if RS6000_WEAK
38227 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38229 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38230 targetm.asm_out.unique_section (decl, 0);
38231 switch_to_section (get_named_section (decl, NULL, 0));
38232 DECL_WEAK (decl) = 1;
38233 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38234 targetm.asm_out.globalize_label (asm_out_file, name);
38235 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38236 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38238 else
38239 #endif
38241 switch_to_section (text_section);
38242 ASM_OUTPUT_LABEL (asm_out_file, name);
38245 DECL_INITIAL (decl) = make_node (BLOCK);
38246 current_function_decl = decl;
38247 allocate_struct_function (decl, false);
38248 init_function_start (decl);
38249 first_function_block_is_cold = false;
38250 /* Make sure unwind info is emitted for the thunk if needed. */
38251 final_start_function (emit_barrier (), asm_out_file, 1);
38253 fputs ("\tblr\n", asm_out_file);
38255 final_end_function ();
38256 init_insn_lengths ();
38257 free_after_compilation (cfun);
38258 set_cfun (NULL);
38259 current_function_decl = NULL;
38262 /* Add r30 to hard reg set if the prologue sets it up and it is not
38263 pic_offset_table_rtx. */
38265 static void
38266 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38268 if (!TARGET_SINGLE_PIC_BASE
38269 && TARGET_TOC
38270 && TARGET_MINIMAL_TOC
38271 && !constant_pool_empty_p ())
38272 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38273 if (cfun->machine->split_stack_argp_used)
38274 add_to_hard_reg_set (&set->set, Pmode, 12);
38276 /* Make sure the hard reg set doesn't include r2, which was possibly added
38277 via PIC_OFFSET_TABLE_REGNUM. */
38278 if (TARGET_TOC)
38279 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38283 /* Helper function for rs6000_split_logical to emit a logical instruction after
38284 spliting the operation to single GPR registers.
38286 DEST is the destination register.
38287 OP1 and OP2 are the input source registers.
38288 CODE is the base operation (AND, IOR, XOR, NOT).
38289 MODE is the machine mode.
38290 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38291 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38292 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38294 static void
38295 rs6000_split_logical_inner (rtx dest,
38296 rtx op1,
38297 rtx op2,
38298 enum rtx_code code,
38299 machine_mode mode,
38300 bool complement_final_p,
38301 bool complement_op1_p,
38302 bool complement_op2_p)
38304 rtx bool_rtx;
38306 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38307 if (op2 && GET_CODE (op2) == CONST_INT
38308 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38309 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38311 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38312 HOST_WIDE_INT value = INTVAL (op2) & mask;
38314 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38315 if (code == AND)
38317 if (value == 0)
38319 emit_insn (gen_rtx_SET (dest, const0_rtx));
38320 return;
38323 else if (value == mask)
38325 if (!rtx_equal_p (dest, op1))
38326 emit_insn (gen_rtx_SET (dest, op1));
38327 return;
38331 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38332 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38333 else if (code == IOR || code == XOR)
38335 if (value == 0)
38337 if (!rtx_equal_p (dest, op1))
38338 emit_insn (gen_rtx_SET (dest, op1));
38339 return;
38344 if (code == AND && mode == SImode
38345 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38347 emit_insn (gen_andsi3 (dest, op1, op2));
38348 return;
38351 if (complement_op1_p)
38352 op1 = gen_rtx_NOT (mode, op1);
38354 if (complement_op2_p)
38355 op2 = gen_rtx_NOT (mode, op2);
38357 /* For canonical RTL, if only one arm is inverted it is the first. */
38358 if (!complement_op1_p && complement_op2_p)
38359 std::swap (op1, op2);
38361 bool_rtx = ((code == NOT)
38362 ? gen_rtx_NOT (mode, op1)
38363 : gen_rtx_fmt_ee (code, mode, op1, op2));
38365 if (complement_final_p)
38366 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38368 emit_insn (gen_rtx_SET (dest, bool_rtx));
38371 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38372 operations are split immediately during RTL generation to allow for more
38373 optimizations of the AND/IOR/XOR.
38375 OPERANDS is an array containing the destination and two input operands.
38376 CODE is the base operation (AND, IOR, XOR, NOT).
38377 MODE is the machine mode.
38378 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38379 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38380 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38381 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38382 formation of the AND instructions. */
38384 static void
38385 rs6000_split_logical_di (rtx operands[3],
38386 enum rtx_code code,
38387 bool complement_final_p,
38388 bool complement_op1_p,
38389 bool complement_op2_p)
38391 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38392 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38393 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38394 enum hi_lo { hi = 0, lo = 1 };
38395 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38396 size_t i;
38398 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38399 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38400 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38401 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38403 if (code == NOT)
38404 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38405 else
38407 if (GET_CODE (operands[2]) != CONST_INT)
38409 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38410 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38412 else
38414 HOST_WIDE_INT value = INTVAL (operands[2]);
38415 HOST_WIDE_INT value_hi_lo[2];
38417 gcc_assert (!complement_final_p);
38418 gcc_assert (!complement_op1_p);
38419 gcc_assert (!complement_op2_p);
38421 value_hi_lo[hi] = value >> 32;
38422 value_hi_lo[lo] = value & lower_32bits;
38424 for (i = 0; i < 2; i++)
38426 HOST_WIDE_INT sub_value = value_hi_lo[i];
38428 if (sub_value & sign_bit)
38429 sub_value |= upper_32bits;
38431 op2_hi_lo[i] = GEN_INT (sub_value);
38433 /* If this is an AND instruction, check to see if we need to load
38434 the value in a register. */
38435 if (code == AND && sub_value != -1 && sub_value != 0
38436 && !and_operand (op2_hi_lo[i], SImode))
38437 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38442 for (i = 0; i < 2; i++)
38444 /* Split large IOR/XOR operations. */
38445 if ((code == IOR || code == XOR)
38446 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38447 && !complement_final_p
38448 && !complement_op1_p
38449 && !complement_op2_p
38450 && !logical_const_operand (op2_hi_lo[i], SImode))
38452 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38453 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38454 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38455 rtx tmp = gen_reg_rtx (SImode);
38457 /* Make sure the constant is sign extended. */
38458 if ((hi_16bits & sign_bit) != 0)
38459 hi_16bits |= upper_32bits;
38461 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38462 code, SImode, false, false, false);
38464 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38465 code, SImode, false, false, false);
38467 else
38468 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38469 code, SImode, complement_final_p,
38470 complement_op1_p, complement_op2_p);
38473 return;
38476 /* Split the insns that make up boolean operations operating on multiple GPR
38477 registers. The boolean MD patterns ensure that the inputs either are
38478 exactly the same as the output registers, or there is no overlap.
38480 OPERANDS is an array containing the destination and two input operands.
38481 CODE is the base operation (AND, IOR, XOR, NOT).
38482 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38483 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38484 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38486 void
38487 rs6000_split_logical (rtx operands[3],
38488 enum rtx_code code,
38489 bool complement_final_p,
38490 bool complement_op1_p,
38491 bool complement_op2_p)
38493 machine_mode mode = GET_MODE (operands[0]);
38494 machine_mode sub_mode;
38495 rtx op0, op1, op2;
38496 int sub_size, regno0, regno1, nregs, i;
38498 /* If this is DImode, use the specialized version that can run before
38499 register allocation. */
38500 if (mode == DImode && !TARGET_POWERPC64)
38502 rs6000_split_logical_di (operands, code, complement_final_p,
38503 complement_op1_p, complement_op2_p);
38504 return;
38507 op0 = operands[0];
38508 op1 = operands[1];
38509 op2 = (code == NOT) ? NULL_RTX : operands[2];
38510 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38511 sub_size = GET_MODE_SIZE (sub_mode);
38512 regno0 = REGNO (op0);
38513 regno1 = REGNO (op1);
38515 gcc_assert (reload_completed);
38516 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38517 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38519 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38520 gcc_assert (nregs > 1);
38522 if (op2 && REG_P (op2))
38523 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38525 for (i = 0; i < nregs; i++)
38527 int offset = i * sub_size;
38528 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38529 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38530 rtx sub_op2 = ((code == NOT)
38531 ? NULL_RTX
38532 : simplify_subreg (sub_mode, op2, mode, offset));
38534 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38535 complement_final_p, complement_op1_p,
38536 complement_op2_p);
38539 return;
38543 /* Return true if the peephole2 can combine a load involving a combination of
38544 an addis instruction and a load with an offset that can be fused together on
38545 a power8. */
38547 bool
38548 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38549 rtx addis_value, /* addis value. */
38550 rtx target, /* target register that is loaded. */
38551 rtx mem) /* bottom part of the memory addr. */
38553 rtx addr;
38554 rtx base_reg;
38556 /* Validate arguments. */
38557 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38558 return false;
38560 if (!base_reg_operand (target, GET_MODE (target)))
38561 return false;
38563 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38564 return false;
38566 /* Allow sign/zero extension. */
38567 if (GET_CODE (mem) == ZERO_EXTEND
38568 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38569 mem = XEXP (mem, 0);
38571 if (!MEM_P (mem))
38572 return false;
38574 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38575 return false;
38577 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38578 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38579 return false;
38581 /* Validate that the register used to load the high value is either the
38582 register being loaded, or we can safely replace its use.
38584 This function is only called from the peephole2 pass and we assume that
38585 there are 2 instructions in the peephole (addis and load), so we want to
38586 check if the target register was not used in the memory address and the
38587 register to hold the addis result is dead after the peephole. */
38588 if (REGNO (addis_reg) != REGNO (target))
38590 if (reg_mentioned_p (target, mem))
38591 return false;
38593 if (!peep2_reg_dead_p (2, addis_reg))
38594 return false;
38596 /* If the target register being loaded is the stack pointer, we must
38597 avoid loading any other value into it, even temporarily. */
38598 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38599 return false;
38602 base_reg = XEXP (addr, 0);
38603 return REGNO (addis_reg) == REGNO (base_reg);
38606 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38607 sequence. We adjust the addis register to use the target register. If the
38608 load sign extends, we adjust the code to do the zero extending load, and an
38609 explicit sign extension later since the fusion only covers zero extending
38610 loads.
38612 The operands are:
38613 operands[0] register set with addis (to be replaced with target)
38614 operands[1] value set via addis
38615 operands[2] target register being loaded
38616 operands[3] D-form memory reference using operands[0]. */
38618 void
38619 expand_fusion_gpr_load (rtx *operands)
38621 rtx addis_value = operands[1];
38622 rtx target = operands[2];
38623 rtx orig_mem = operands[3];
38624 rtx new_addr, new_mem, orig_addr, offset;
38625 enum rtx_code plus_or_lo_sum;
38626 machine_mode target_mode = GET_MODE (target);
38627 machine_mode extend_mode = target_mode;
38628 machine_mode ptr_mode = Pmode;
38629 enum rtx_code extend = UNKNOWN;
38631 if (GET_CODE (orig_mem) == ZERO_EXTEND
38632 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38634 extend = GET_CODE (orig_mem);
38635 orig_mem = XEXP (orig_mem, 0);
38636 target_mode = GET_MODE (orig_mem);
38639 gcc_assert (MEM_P (orig_mem));
38641 orig_addr = XEXP (orig_mem, 0);
38642 plus_or_lo_sum = GET_CODE (orig_addr);
38643 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38645 offset = XEXP (orig_addr, 1);
38646 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38647 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38649 if (extend != UNKNOWN)
38650 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38652 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38653 UNSPEC_FUSION_GPR);
38654 emit_insn (gen_rtx_SET (target, new_mem));
38656 if (extend == SIGN_EXTEND)
38658 int sub_off = ((BYTES_BIG_ENDIAN)
38659 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38660 : 0);
38661 rtx sign_reg
38662 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38664 emit_insn (gen_rtx_SET (target,
38665 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38668 return;
38671 /* Emit the addis instruction that will be part of a fused instruction
38672 sequence. */
38674 void
38675 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38676 const char *mode_name)
38678 rtx fuse_ops[10];
38679 char insn_template[80];
38680 const char *addis_str = NULL;
38681 const char *comment_str = ASM_COMMENT_START;
38683 if (*comment_str == ' ')
38684 comment_str++;
38686 /* Emit the addis instruction. */
38687 fuse_ops[0] = target;
38688 if (satisfies_constraint_L (addis_value))
38690 fuse_ops[1] = addis_value;
38691 addis_str = "lis %0,%v1";
38694 else if (GET_CODE (addis_value) == PLUS)
38696 rtx op0 = XEXP (addis_value, 0);
38697 rtx op1 = XEXP (addis_value, 1);
38699 if (REG_P (op0) && CONST_INT_P (op1)
38700 && satisfies_constraint_L (op1))
38702 fuse_ops[1] = op0;
38703 fuse_ops[2] = op1;
38704 addis_str = "addis %0,%1,%v2";
38708 else if (GET_CODE (addis_value) == HIGH)
38710 rtx value = XEXP (addis_value, 0);
38711 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38713 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38714 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38715 if (TARGET_ELF)
38716 addis_str = "addis %0,%2,%1@toc@ha";
38718 else if (TARGET_XCOFF)
38719 addis_str = "addis %0,%1@u(%2)";
38721 else
38722 gcc_unreachable ();
38725 else if (GET_CODE (value) == PLUS)
38727 rtx op0 = XEXP (value, 0);
38728 rtx op1 = XEXP (value, 1);
38730 if (GET_CODE (op0) == UNSPEC
38731 && XINT (op0, 1) == UNSPEC_TOCREL
38732 && CONST_INT_P (op1))
38734 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38735 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38736 fuse_ops[3] = op1;
38737 if (TARGET_ELF)
38738 addis_str = "addis %0,%2,%1+%3@toc@ha";
38740 else if (TARGET_XCOFF)
38741 addis_str = "addis %0,%1+%3@u(%2)";
38743 else
38744 gcc_unreachable ();
38748 else if (satisfies_constraint_L (value))
38750 fuse_ops[1] = value;
38751 addis_str = "lis %0,%v1";
38754 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38756 fuse_ops[1] = value;
38757 addis_str = "lis %0,%1@ha";
38761 if (!addis_str)
38762 fatal_insn ("Could not generate addis value for fusion", addis_value);
38764 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38765 comment, mode_name);
38766 output_asm_insn (insn_template, fuse_ops);
38769 /* Emit a D-form load or store instruction that is the second instruction
38770 of a fusion sequence. */
38772 void
38773 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38774 const char *insn_str)
38776 rtx fuse_ops[10];
38777 char insn_template[80];
38779 fuse_ops[0] = load_store_reg;
38780 fuse_ops[1] = addis_reg;
38782 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38784 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38785 fuse_ops[2] = offset;
38786 output_asm_insn (insn_template, fuse_ops);
38789 else if (GET_CODE (offset) == UNSPEC
38790 && XINT (offset, 1) == UNSPEC_TOCREL)
38792 if (TARGET_ELF)
38793 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38795 else if (TARGET_XCOFF)
38796 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38798 else
38799 gcc_unreachable ();
38801 fuse_ops[2] = XVECEXP (offset, 0, 0);
38802 output_asm_insn (insn_template, fuse_ops);
38805 else if (GET_CODE (offset) == PLUS
38806 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38807 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38808 && CONST_INT_P (XEXP (offset, 1)))
38810 rtx tocrel_unspec = XEXP (offset, 0);
38811 if (TARGET_ELF)
38812 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38814 else if (TARGET_XCOFF)
38815 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38817 else
38818 gcc_unreachable ();
38820 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38821 fuse_ops[3] = XEXP (offset, 1);
38822 output_asm_insn (insn_template, fuse_ops);
38825 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38827 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38829 fuse_ops[2] = offset;
38830 output_asm_insn (insn_template, fuse_ops);
38833 else
38834 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38836 return;
38839 /* Wrap a TOC address that can be fused to indicate that special fusion
38840 processing is needed. */
38843 fusion_wrap_memory_address (rtx old_mem)
38845 rtx old_addr = XEXP (old_mem, 0);
38846 rtvec v = gen_rtvec (1, old_addr);
38847 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38848 return replace_equiv_address_nv (old_mem, new_addr, false);
38851 /* Given an address, convert it into the addis and load offset parts. Addresses
38852 created during the peephole2 process look like:
38853 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38854 (unspec [(...)] UNSPEC_TOCREL))
38856 Addresses created via toc fusion look like:
38857 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38859 static void
38860 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38862 rtx hi, lo;
38864 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38866 lo = XVECEXP (addr, 0, 0);
38867 hi = gen_rtx_HIGH (Pmode, lo);
38869 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38871 hi = XEXP (addr, 0);
38872 lo = XEXP (addr, 1);
38874 else
38875 gcc_unreachable ();
38877 *p_hi = hi;
38878 *p_lo = lo;
38881 /* Return a string to fuse an addis instruction with a gpr load to the same
38882 register that we loaded up the addis instruction. The address that is used
38883 is the logical address that was formed during peephole2:
38884 (lo_sum (high) (low-part))
38886 Or the address is the TOC address that is wrapped before register allocation:
38887 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38889 The code is complicated, so we call output_asm_insn directly, and just
38890 return "". */
38892 const char *
38893 emit_fusion_gpr_load (rtx target, rtx mem)
38895 rtx addis_value;
38896 rtx addr;
38897 rtx load_offset;
38898 const char *load_str = NULL;
38899 const char *mode_name = NULL;
38900 machine_mode mode;
38902 if (GET_CODE (mem) == ZERO_EXTEND)
38903 mem = XEXP (mem, 0);
38905 gcc_assert (REG_P (target) && MEM_P (mem));
38907 addr = XEXP (mem, 0);
38908 fusion_split_address (addr, &addis_value, &load_offset);
38910 /* Now emit the load instruction to the same register. */
38911 mode = GET_MODE (mem);
38912 switch (mode)
38914 case E_QImode:
38915 mode_name = "char";
38916 load_str = "lbz";
38917 break;
38919 case E_HImode:
38920 mode_name = "short";
38921 load_str = "lhz";
38922 break;
38924 case E_SImode:
38925 case E_SFmode:
38926 mode_name = (mode == SFmode) ? "float" : "int";
38927 load_str = "lwz";
38928 break;
38930 case E_DImode:
38931 case E_DFmode:
38932 gcc_assert (TARGET_POWERPC64);
38933 mode_name = (mode == DFmode) ? "double" : "long";
38934 load_str = "ld";
38935 break;
38937 default:
38938 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38941 /* Emit the addis instruction. */
38942 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38944 /* Emit the D-form load instruction. */
38945 emit_fusion_load_store (target, target, load_offset, load_str);
38947 return "";
38951 /* Return true if the peephole2 can combine a load/store involving a
38952 combination of an addis instruction and the memory operation. This was
38953 added to the ISA 3.0 (power9) hardware. */
38955 bool
38956 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38957 rtx addis_value, /* addis value. */
38958 rtx dest, /* destination (memory or register). */
38959 rtx src) /* source (register or memory). */
38961 rtx addr, mem, offset;
38962 machine_mode mode = GET_MODE (src);
38964 /* Validate arguments. */
38965 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38966 return false;
38968 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38969 return false;
38971 /* Ignore extend operations that are part of the load. */
38972 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38973 src = XEXP (src, 0);
38975 /* Test for memory<-register or register<-memory. */
38976 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38978 if (!MEM_P (dest))
38979 return false;
38981 mem = dest;
38984 else if (MEM_P (src))
38986 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38987 return false;
38989 mem = src;
38992 else
38993 return false;
38995 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38996 if (GET_CODE (addr) == PLUS)
38998 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38999 return false;
39001 return satisfies_constraint_I (XEXP (addr, 1));
39004 else if (GET_CODE (addr) == LO_SUM)
39006 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
39007 return false;
39009 offset = XEXP (addr, 1);
39010 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
39011 return small_toc_ref (offset, GET_MODE (offset));
39013 else if (TARGET_ELF && !TARGET_POWERPC64)
39014 return CONSTANT_P (offset);
39017 return false;
39020 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39021 load sequence.
39023 The operands are:
39024 operands[0] register set with addis
39025 operands[1] value set via addis
39026 operands[2] target register being loaded
39027 operands[3] D-form memory reference using operands[0].
39029 This is similar to the fusion introduced with power8, except it scales to
39030 both loads/stores and does not require the result register to be the same as
39031 the base register. At the moment, we only do this if register set with addis
39032 is dead. */
39034 void
39035 expand_fusion_p9_load (rtx *operands)
39037 rtx tmp_reg = operands[0];
39038 rtx addis_value = operands[1];
39039 rtx target = operands[2];
39040 rtx orig_mem = operands[3];
39041 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
39042 enum rtx_code plus_or_lo_sum;
39043 machine_mode target_mode = GET_MODE (target);
39044 machine_mode extend_mode = target_mode;
39045 machine_mode ptr_mode = Pmode;
39046 enum rtx_code extend = UNKNOWN;
39048 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
39050 extend = GET_CODE (orig_mem);
39051 orig_mem = XEXP (orig_mem, 0);
39052 target_mode = GET_MODE (orig_mem);
39055 gcc_assert (MEM_P (orig_mem));
39057 orig_addr = XEXP (orig_mem, 0);
39058 plus_or_lo_sum = GET_CODE (orig_addr);
39059 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39061 offset = XEXP (orig_addr, 1);
39062 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39063 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39065 if (extend != UNKNOWN)
39066 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
39068 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
39069 UNSPEC_FUSION_P9);
39071 set = gen_rtx_SET (target, new_mem);
39072 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39073 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39074 emit_insn (insn);
39076 return;
39079 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
39080 store sequence.
39082 The operands are:
39083 operands[0] register set with addis
39084 operands[1] value set via addis
39085 operands[2] target D-form memory being stored to
39086 operands[3] register being stored
39088 This is similar to the fusion introduced with power8, except it scales to
39089 both loads/stores and does not require the result register to be the same as
39090 the base register. At the moment, we only do this if register set with addis
39091 is dead. */
39093 void
39094 expand_fusion_p9_store (rtx *operands)
39096 rtx tmp_reg = operands[0];
39097 rtx addis_value = operands[1];
39098 rtx orig_mem = operands[2];
39099 rtx src = operands[3];
39100 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
39101 enum rtx_code plus_or_lo_sum;
39102 machine_mode target_mode = GET_MODE (orig_mem);
39103 machine_mode ptr_mode = Pmode;
39105 gcc_assert (MEM_P (orig_mem));
39107 orig_addr = XEXP (orig_mem, 0);
39108 plus_or_lo_sum = GET_CODE (orig_addr);
39109 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
39111 offset = XEXP (orig_addr, 1);
39112 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
39113 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
39115 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
39116 UNSPEC_FUSION_P9);
39118 set = gen_rtx_SET (new_mem, new_src);
39119 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
39120 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
39121 emit_insn (insn);
39123 return;
39126 /* Return a string to fuse an addis instruction with a load using extended
39127 fusion. The address that is used is the logical address that was formed
39128 during peephole2: (lo_sum (high) (low-part))
39130 The code is complicated, so we call output_asm_insn directly, and just
39131 return "". */
39133 const char *
39134 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
39136 machine_mode mode = GET_MODE (reg);
39137 rtx hi;
39138 rtx lo;
39139 rtx addr;
39140 const char *load_string;
39141 int r;
39143 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
39145 mem = XEXP (mem, 0);
39146 mode = GET_MODE (mem);
39149 if (GET_CODE (reg) == SUBREG)
39151 gcc_assert (SUBREG_BYTE (reg) == 0);
39152 reg = SUBREG_REG (reg);
39155 if (!REG_P (reg))
39156 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
39158 r = REGNO (reg);
39159 if (FP_REGNO_P (r))
39161 if (mode == SFmode)
39162 load_string = "lfs";
39163 else if (mode == DFmode || mode == DImode)
39164 load_string = "lfd";
39165 else
39166 gcc_unreachable ();
39168 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39170 if (mode == SFmode)
39171 load_string = "lxssp";
39172 else if (mode == DFmode || mode == DImode)
39173 load_string = "lxsd";
39174 else
39175 gcc_unreachable ();
39177 else if (INT_REGNO_P (r))
39179 switch (mode)
39181 case E_QImode:
39182 load_string = "lbz";
39183 break;
39184 case E_HImode:
39185 load_string = "lhz";
39186 break;
39187 case E_SImode:
39188 case E_SFmode:
39189 load_string = "lwz";
39190 break;
39191 case E_DImode:
39192 case E_DFmode:
39193 if (!TARGET_POWERPC64)
39194 gcc_unreachable ();
39195 load_string = "ld";
39196 break;
39197 default:
39198 gcc_unreachable ();
39201 else
39202 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
39204 if (!MEM_P (mem))
39205 fatal_insn ("emit_fusion_p9_load not MEM", mem);
39207 addr = XEXP (mem, 0);
39208 fusion_split_address (addr, &hi, &lo);
39210 /* Emit the addis instruction. */
39211 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
39213 /* Emit the D-form load instruction. */
39214 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
39216 return "";
39219 /* Return a string to fuse an addis instruction with a store using extended
39220 fusion. The address that is used is the logical address that was formed
39221 during peephole2: (lo_sum (high) (low-part))
39223 The code is complicated, so we call output_asm_insn directly, and just
39224 return "". */
39226 const char *
39227 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
39229 machine_mode mode = GET_MODE (reg);
39230 rtx hi;
39231 rtx lo;
39232 rtx addr;
39233 const char *store_string;
39234 int r;
39236 if (GET_CODE (reg) == SUBREG)
39238 gcc_assert (SUBREG_BYTE (reg) == 0);
39239 reg = SUBREG_REG (reg);
39242 if (!REG_P (reg))
39243 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
39245 r = REGNO (reg);
39246 if (FP_REGNO_P (r))
39248 if (mode == SFmode)
39249 store_string = "stfs";
39250 else if (mode == DFmode)
39251 store_string = "stfd";
39252 else
39253 gcc_unreachable ();
39255 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
39257 if (mode == SFmode)
39258 store_string = "stxssp";
39259 else if (mode == DFmode || mode == DImode)
39260 store_string = "stxsd";
39261 else
39262 gcc_unreachable ();
39264 else if (INT_REGNO_P (r))
39266 switch (mode)
39268 case E_QImode:
39269 store_string = "stb";
39270 break;
39271 case E_HImode:
39272 store_string = "sth";
39273 break;
39274 case E_SImode:
39275 case E_SFmode:
39276 store_string = "stw";
39277 break;
39278 case E_DImode:
39279 case E_DFmode:
39280 if (!TARGET_POWERPC64)
39281 gcc_unreachable ();
39282 store_string = "std";
39283 break;
39284 default:
39285 gcc_unreachable ();
39288 else
39289 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
39291 if (!MEM_P (mem))
39292 fatal_insn ("emit_fusion_p9_store not MEM", mem);
39294 addr = XEXP (mem, 0);
39295 fusion_split_address (addr, &hi, &lo);
39297 /* Emit the addis instruction. */
39298 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
39300 /* Emit the D-form load instruction. */
39301 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
39303 return "";
39306 #ifdef RS6000_GLIBC_ATOMIC_FENV
39307 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39308 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39309 #endif
39311 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39313 static void
39314 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39316 if (!TARGET_HARD_FLOAT)
39318 #ifdef RS6000_GLIBC_ATOMIC_FENV
39319 if (atomic_hold_decl == NULL_TREE)
39321 atomic_hold_decl
39322 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39323 get_identifier ("__atomic_feholdexcept"),
39324 build_function_type_list (void_type_node,
39325 double_ptr_type_node,
39326 NULL_TREE));
39327 TREE_PUBLIC (atomic_hold_decl) = 1;
39328 DECL_EXTERNAL (atomic_hold_decl) = 1;
39331 if (atomic_clear_decl == NULL_TREE)
39333 atomic_clear_decl
39334 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39335 get_identifier ("__atomic_feclearexcept"),
39336 build_function_type_list (void_type_node,
39337 NULL_TREE));
39338 TREE_PUBLIC (atomic_clear_decl) = 1;
39339 DECL_EXTERNAL (atomic_clear_decl) = 1;
39342 tree const_double = build_qualified_type (double_type_node,
39343 TYPE_QUAL_CONST);
39344 tree const_double_ptr = build_pointer_type (const_double);
39345 if (atomic_update_decl == NULL_TREE)
39347 atomic_update_decl
39348 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39349 get_identifier ("__atomic_feupdateenv"),
39350 build_function_type_list (void_type_node,
39351 const_double_ptr,
39352 NULL_TREE));
39353 TREE_PUBLIC (atomic_update_decl) = 1;
39354 DECL_EXTERNAL (atomic_update_decl) = 1;
39357 tree fenv_var = create_tmp_var_raw (double_type_node);
39358 TREE_ADDRESSABLE (fenv_var) = 1;
39359 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39361 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39362 *clear = build_call_expr (atomic_clear_decl, 0);
39363 *update = build_call_expr (atomic_update_decl, 1,
39364 fold_convert (const_double_ptr, fenv_addr));
39365 #endif
39366 return;
39369 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39370 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39371 tree call_mffs = build_call_expr (mffs, 0);
39373 /* Generates the equivalent of feholdexcept (&fenv_var)
39375 *fenv_var = __builtin_mffs ();
39376 double fenv_hold;
39377 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39378 __builtin_mtfsf (0xff, fenv_hold); */
39380 /* Mask to clear everything except for the rounding modes and non-IEEE
39381 arithmetic flag. */
39382 const unsigned HOST_WIDE_INT hold_exception_mask =
39383 HOST_WIDE_INT_C (0xffffffff00000007);
39385 tree fenv_var = create_tmp_var_raw (double_type_node);
39387 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39389 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39390 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39391 build_int_cst (uint64_type_node,
39392 hold_exception_mask));
39394 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39395 fenv_llu_and);
39397 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39398 build_int_cst (unsigned_type_node, 0xff),
39399 fenv_hold_mtfsf);
39401 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39403 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39405 double fenv_clear = __builtin_mffs ();
39406 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39407 __builtin_mtfsf (0xff, fenv_clear); */
39409 /* Mask to clear everything except for the rounding modes and non-IEEE
39410 arithmetic flag. */
39411 const unsigned HOST_WIDE_INT clear_exception_mask =
39412 HOST_WIDE_INT_C (0xffffffff00000000);
39414 tree fenv_clear = create_tmp_var_raw (double_type_node);
39416 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39418 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39419 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39420 fenv_clean_llu,
39421 build_int_cst (uint64_type_node,
39422 clear_exception_mask));
39424 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39425 fenv_clear_llu_and);
39427 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39428 build_int_cst (unsigned_type_node, 0xff),
39429 fenv_clear_mtfsf);
39431 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39433 /* Generates the equivalent of feupdateenv (&fenv_var)
39435 double old_fenv = __builtin_mffs ();
39436 double fenv_update;
39437 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39438 (*(uint64_t*)fenv_var 0x1ff80fff);
39439 __builtin_mtfsf (0xff, fenv_update); */
39441 const unsigned HOST_WIDE_INT update_exception_mask =
39442 HOST_WIDE_INT_C (0xffffffff1fffff00);
39443 const unsigned HOST_WIDE_INT new_exception_mask =
39444 HOST_WIDE_INT_C (0x1ff80fff);
39446 tree old_fenv = create_tmp_var_raw (double_type_node);
39447 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39449 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39450 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39451 build_int_cst (uint64_type_node,
39452 update_exception_mask));
39454 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39455 build_int_cst (uint64_type_node,
39456 new_exception_mask));
39458 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39459 old_llu_and, new_llu_and);
39461 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39462 new_llu_mask);
39464 tree update_mtfsf = build_call_expr (mtfsf, 2,
39465 build_int_cst (unsigned_type_node, 0xff),
39466 fenv_update_mtfsf);
39468 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39471 void
39472 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39474 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39476 rtx_tmp0 = gen_reg_rtx (V2DImode);
39477 rtx_tmp1 = gen_reg_rtx (V2DImode);
39479 /* The destination of the vmrgew instruction layout is:
39480 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39481 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39482 vmrgew instruction will be correct. */
39483 if (VECTOR_ELT_ORDER_BIG)
39485 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39486 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39488 else
39490 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39491 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39494 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39495 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39497 if (signed_convert)
39499 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39500 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39502 else
39504 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39505 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39508 if (VECTOR_ELT_ORDER_BIG)
39509 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39510 else
39511 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39514 void
39515 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39516 rtx src2)
39518 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39520 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39521 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39523 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39524 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39526 rtx_tmp2 = gen_reg_rtx (V4SImode);
39527 rtx_tmp3 = gen_reg_rtx (V4SImode);
39529 if (signed_convert)
39531 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39532 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39534 else
39536 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39537 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39540 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39543 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39545 static bool
39546 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39547 optimization_type opt_type)
39549 switch (op)
39551 case rsqrt_optab:
39552 return (opt_type == OPTIMIZE_FOR_SPEED
39553 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39555 default:
39556 return true;
39560 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39562 static HOST_WIDE_INT
39563 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39565 if (TREE_CODE (exp) == STRING_CST
39566 && (STRICT_ALIGNMENT || !optimize_size))
39567 return MAX (align, BITS_PER_WORD);
39568 return align;
39571 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39573 static HOST_WIDE_INT
39574 rs6000_starting_frame_offset (void)
39576 if (FRAME_GROWS_DOWNWARD)
39577 return 0;
39578 return RS6000_STARTING_FRAME_OFFSET;
39581 struct gcc_target targetm = TARGET_INITIALIZER;
39583 #include "gt-rs6000.h"