PR82045: Avoid passing machine modes through "..."
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
bloba787a29f9f870919d96b1496d270143b8fcbc358
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 /* Structure used to define the rs6000 stack */
93 typedef struct rs6000_stack {
94 int reload_completed; /* stack info won't change from here on */
95 int first_gp_reg_save; /* first callee saved GP register used */
96 int first_fp_reg_save; /* first callee saved FP register used */
97 int first_altivec_reg_save; /* first callee saved AltiVec register used */
98 int lr_save_p; /* true if the link reg needs to be saved */
99 int cr_save_p; /* true if the CR reg needs to be saved */
100 unsigned int vrsave_mask; /* mask of vec registers to save */
101 int push_p; /* true if we need to allocate stack space */
102 int calls_p; /* true if the function makes any calls */
103 int world_save_p; /* true if we're saving *everything*:
104 r13-r31, cr, f14-f31, vrsave, v20-v31 */
105 enum rs6000_abi abi; /* which ABI to use */
106 int gp_save_offset; /* offset to save GP regs from initial SP */
107 int fp_save_offset; /* offset to save FP regs from initial SP */
108 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
109 int lr_save_offset; /* offset to save LR from initial SP */
110 int cr_save_offset; /* offset to save CR from initial SP */
111 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
112 int varargs_save_offset; /* offset to save the varargs registers */
113 int ehrd_offset; /* offset to EH return data */
114 int ehcr_offset; /* offset to EH CR field data */
115 int reg_size; /* register size (4 or 8) */
116 HOST_WIDE_INT vars_size; /* variable save area size */
117 int parm_size; /* outgoing parameter size */
118 int save_size; /* save area size */
119 int fixed_size; /* fixed size of stack frame */
120 int gp_size; /* size of saved GP registers */
121 int fp_size; /* size of saved FP registers */
122 int altivec_size; /* size of saved AltiVec registers */
123 int cr_size; /* size to hold CR if not in fixed area */
124 int vrsave_size; /* size to hold VRSAVE */
125 int altivec_padding_size; /* size of altivec alignment padding */
126 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
127 int savres_strategy;
128 } rs6000_stack_t;
130 /* A C structure for machine-specific, per-function data.
131 This is added to the cfun structure. */
132 typedef struct GTY(()) machine_function
134 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
135 int ra_needs_full_frame;
136 /* Flags if __builtin_return_address (0) was used. */
137 int ra_need_lr;
138 /* Cache lr_save_p after expansion of builtin_eh_return. */
139 int lr_save_state;
140 /* Whether we need to save the TOC to the reserved stack location in the
141 function prologue. */
142 bool save_toc_in_prologue;
143 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
144 varargs save area. */
145 HOST_WIDE_INT varargs_save_offset;
146 /* Alternative internal arg pointer for -fsplit-stack. */
147 rtx split_stack_arg_pointer;
148 bool split_stack_argp_used;
149 /* Flag if r2 setup is needed with ELFv2 ABI. */
150 bool r2_setup_needed;
151 /* The number of components we use for separate shrink-wrapping. */
152 int n_components;
153 /* The components already handled by separate shrink-wrapping, which should
154 not be considered by the prologue and epilogue. */
155 bool gpr_is_wrapped_separately[32];
156 bool fpr_is_wrapped_separately[32];
157 bool lr_is_wrapped_separately;
158 } machine_function;
160 /* Support targetm.vectorize.builtin_mask_for_load. */
161 static GTY(()) tree altivec_builtin_mask_for_load;
163 /* Set to nonzero once AIX common-mode calls have been defined. */
164 static GTY(()) int common_mode_defined;
166 /* Label number of label created for -mrelocatable, to call to so we can
167 get the address of the GOT section */
168 static int rs6000_pic_labelno;
170 #ifdef USING_ELFOS_H
171 /* Counter for labels which are to be placed in .fixup. */
172 int fixuplabelno = 0;
173 #endif
175 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
176 int dot_symbols;
178 /* Specify the machine mode that pointers have. After generation of rtl, the
179 compiler makes no further distinction between pointers and any other objects
180 of this machine mode. */
181 scalar_int_mode rs6000_pmode;
183 /* Width in bits of a pointer. */
184 unsigned rs6000_pointer_size;
186 #ifdef HAVE_AS_GNU_ATTRIBUTE
187 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
188 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
189 # endif
190 /* Flag whether floating point values have been passed/returned.
191 Note that this doesn't say whether fprs are used, since the
192 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
193 should be set for soft-float values passed in gprs and ieee128
194 values passed in vsx registers. */
195 static bool rs6000_passes_float;
196 static bool rs6000_passes_long_double;
197 /* Flag whether vector values have been passed/returned. */
198 static bool rs6000_passes_vector;
199 /* Flag whether small (<= 8 byte) structures have been returned. */
200 static bool rs6000_returns_struct;
201 #endif
203 /* Value is TRUE if register/mode pair is acceptable. */
204 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
206 /* Maximum number of registers needed for a given register class and mode. */
207 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
209 /* How many registers are needed for a given register and mode. */
210 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
212 /* Map register number to register class. */
213 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
215 static int dbg_cost_ctrl;
217 /* Built in types. */
218 tree rs6000_builtin_types[RS6000_BTI_MAX];
219 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
221 /* Flag to say the TOC is initialized */
222 int toc_initialized, need_toc_init;
223 char toc_label_name[10];
225 /* Cached value of rs6000_variable_issue. This is cached in
226 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
227 static short cached_can_issue_more;
229 static GTY(()) section *read_only_data_section;
230 static GTY(()) section *private_data_section;
231 static GTY(()) section *tls_data_section;
232 static GTY(()) section *tls_private_data_section;
233 static GTY(()) section *read_only_private_data_section;
234 static GTY(()) section *sdata2_section;
235 static GTY(()) section *toc_section;
237 struct builtin_description
239 const HOST_WIDE_INT mask;
240 const enum insn_code icode;
241 const char *const name;
242 const enum rs6000_builtins code;
245 /* Describe the vector unit used for modes. */
246 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
247 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
249 /* Register classes for various constraints that are based on the target
250 switches. */
251 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
253 /* Describe the alignment of a vector. */
254 int rs6000_vector_align[NUM_MACHINE_MODES];
256 /* Map selected modes to types for builtins. */
257 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
259 /* What modes to automatically generate reciprocal divide estimate (fre) and
260 reciprocal sqrt (frsqrte) for. */
261 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
263 /* Masks to determine which reciprocal esitmate instructions to generate
264 automatically. */
265 enum rs6000_recip_mask {
266 RECIP_SF_DIV = 0x001, /* Use divide estimate */
267 RECIP_DF_DIV = 0x002,
268 RECIP_V4SF_DIV = 0x004,
269 RECIP_V2DF_DIV = 0x008,
271 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
272 RECIP_DF_RSQRT = 0x020,
273 RECIP_V4SF_RSQRT = 0x040,
274 RECIP_V2DF_RSQRT = 0x080,
276 /* Various combination of flags for -mrecip=xxx. */
277 RECIP_NONE = 0,
278 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
279 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
280 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
282 RECIP_HIGH_PRECISION = RECIP_ALL,
284 /* On low precision machines like the power5, don't enable double precision
285 reciprocal square root estimate, since it isn't accurate enough. */
286 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
289 /* -mrecip options. */
290 static struct
292 const char *string; /* option name */
293 unsigned int mask; /* mask bits to set */
294 } recip_options[] = {
295 { "all", RECIP_ALL },
296 { "none", RECIP_NONE },
297 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
298 | RECIP_V2DF_DIV) },
299 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
300 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
301 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
302 | RECIP_V2DF_RSQRT) },
303 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
304 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
307 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
308 static const struct
310 const char *cpu;
311 unsigned int cpuid;
312 } cpu_is_info[] = {
313 { "power9", PPC_PLATFORM_POWER9 },
314 { "power8", PPC_PLATFORM_POWER8 },
315 { "power7", PPC_PLATFORM_POWER7 },
316 { "power6x", PPC_PLATFORM_POWER6X },
317 { "power6", PPC_PLATFORM_POWER6 },
318 { "power5+", PPC_PLATFORM_POWER5_PLUS },
319 { "power5", PPC_PLATFORM_POWER5 },
320 { "ppc970", PPC_PLATFORM_PPC970 },
321 { "power4", PPC_PLATFORM_POWER4 },
322 { "ppca2", PPC_PLATFORM_PPCA2 },
323 { "ppc476", PPC_PLATFORM_PPC476 },
324 { "ppc464", PPC_PLATFORM_PPC464 },
325 { "ppc440", PPC_PLATFORM_PPC440 },
326 { "ppc405", PPC_PLATFORM_PPC405 },
327 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
330 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
331 static const struct
333 const char *hwcap;
334 int mask;
335 unsigned int id;
336 } cpu_supports_info[] = {
337 /* AT_HWCAP masks. */
338 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
339 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
340 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
341 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
342 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
343 { "booke", PPC_FEATURE_BOOKE, 0 },
344 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
345 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
346 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
347 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
348 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
349 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
350 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
351 { "notb", PPC_FEATURE_NO_TB, 0 },
352 { "pa6t", PPC_FEATURE_PA6T, 0 },
353 { "power4", PPC_FEATURE_POWER4, 0 },
354 { "power5", PPC_FEATURE_POWER5, 0 },
355 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
356 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
357 { "ppc32", PPC_FEATURE_32, 0 },
358 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
359 { "ppc64", PPC_FEATURE_64, 0 },
360 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
361 { "smt", PPC_FEATURE_SMT, 0 },
362 { "spe", PPC_FEATURE_HAS_SPE, 0 },
363 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
364 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
365 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
367 /* AT_HWCAP2 masks. */
368 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
369 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
370 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
371 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
372 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
373 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
374 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
375 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
376 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
377 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
378 { "darn", PPC_FEATURE2_DARN, 1 },
379 { "scv", PPC_FEATURE2_SCV, 1 }
382 /* On PowerPC, we have a limited number of target clones that we care about
383 which means we can use an array to hold the options, rather than having more
384 elaborate data structures to identify each possible variation. Order the
385 clones from the default to the highest ISA. */
386 enum {
387 CLONE_DEFAULT = 0, /* default clone. */
388 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
389 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
390 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
391 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
392 CLONE_MAX
395 /* Map compiler ISA bits into HWCAP names. */
396 struct clone_map {
397 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
398 const char *name; /* name to use in __builtin_cpu_supports. */
401 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
402 { 0, "" }, /* Default options. */
403 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
404 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
405 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
406 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
410 /* Newer LIBCs explicitly export this symbol to declare that they provide
411 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
412 reference to this symbol whenever we expand a CPU builtin, so that
413 we never link against an old LIBC. */
414 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
416 /* True if we have expanded a CPU builtin. */
417 bool cpu_builtin_p;
419 /* Pointer to function (in rs6000-c.c) that can define or undefine target
420 macros that have changed. Languages that don't support the preprocessor
421 don't link in rs6000-c.c, so we can't call it directly. */
422 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
424 /* Simplfy register classes into simpler classifications. We assume
425 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
426 check for standard register classes (gpr/floating/altivec/vsx) and
427 floating/vector classes (float/altivec/vsx). */
429 enum rs6000_reg_type {
430 NO_REG_TYPE,
431 PSEUDO_REG_TYPE,
432 GPR_REG_TYPE,
433 VSX_REG_TYPE,
434 ALTIVEC_REG_TYPE,
435 FPR_REG_TYPE,
436 SPR_REG_TYPE,
437 CR_REG_TYPE
440 /* Map register class to register type. */
441 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
443 /* First/last register type for the 'normal' register types (i.e. general
444 purpose, floating point, altivec, and VSX registers). */
445 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
447 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
450 /* Register classes we care about in secondary reload or go if legitimate
451 address. We only need to worry about GPR, FPR, and Altivec registers here,
452 along an ANY field that is the OR of the 3 register classes. */
454 enum rs6000_reload_reg_type {
455 RELOAD_REG_GPR, /* General purpose registers. */
456 RELOAD_REG_FPR, /* Traditional floating point regs. */
457 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
458 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
459 N_RELOAD_REG
462 /* For setting up register classes, loop through the 3 register classes mapping
463 into real registers, and skip the ANY class, which is just an OR of the
464 bits. */
465 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
466 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
468 /* Map reload register type to a register in the register class. */
469 struct reload_reg_map_type {
470 const char *name; /* Register class name. */
471 int reg; /* Register in the register class. */
474 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
475 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
476 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
477 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
478 { "Any", -1 }, /* RELOAD_REG_ANY. */
481 /* Mask bits for each register class, indexed per mode. Historically the
482 compiler has been more restrictive which types can do PRE_MODIFY instead of
483 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
484 typedef unsigned char addr_mask_type;
486 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
487 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
488 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
489 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
490 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
491 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
492 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
493 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
495 /* Register type masks based on the type, of valid addressing modes. */
496 struct rs6000_reg_addr {
497 enum insn_code reload_load; /* INSN to reload for loading. */
498 enum insn_code reload_store; /* INSN to reload for storing. */
499 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
500 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
501 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
502 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
503 /* INSNs for fusing addi with loads
504 or stores for each reg. class. */
505 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
506 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
507 /* INSNs for fusing addis with loads
508 or stores for each reg. class. */
509 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
510 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
511 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
512 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
513 bool fused_toc; /* Mode supports TOC fusion. */
516 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
518 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
519 static inline bool
520 mode_supports_pre_incdec_p (machine_mode mode)
522 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
523 != 0);
526 /* Helper function to say whether a mode supports PRE_MODIFY. */
527 static inline bool
528 mode_supports_pre_modify_p (machine_mode mode)
530 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
531 != 0);
534 /* Given that there exists at least one variable that is set (produced)
535 by OUT_INSN and read (consumed) by IN_INSN, return true iff
536 IN_INSN represents one or more memory store operations and none of
537 the variables set by OUT_INSN is used by IN_INSN as the address of a
538 store operation. If either IN_INSN or OUT_INSN does not represent
539 a "single" RTL SET expression (as loosely defined by the
540 implementation of the single_set function) or a PARALLEL with only
541 SETs, CLOBBERs, and USEs inside, this function returns false.
543 This rs6000-specific version of store_data_bypass_p checks for
544 certain conditions that result in assertion failures (and internal
545 compiler errors) in the generic store_data_bypass_p function and
546 returns false rather than calling store_data_bypass_p if one of the
547 problematic conditions is detected. */
550 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
552 rtx out_set, in_set;
553 rtx out_pat, in_pat;
554 rtx out_exp, in_exp;
555 int i, j;
557 in_set = single_set (in_insn);
558 if (in_set)
560 if (MEM_P (SET_DEST (in_set)))
562 out_set = single_set (out_insn);
563 if (!out_set)
565 out_pat = PATTERN (out_insn);
566 if (GET_CODE (out_pat) == PARALLEL)
568 for (i = 0; i < XVECLEN (out_pat, 0); i++)
570 out_exp = XVECEXP (out_pat, 0, i);
571 if ((GET_CODE (out_exp) == CLOBBER)
572 || (GET_CODE (out_exp) == USE))
573 continue;
574 else if (GET_CODE (out_exp) != SET)
575 return false;
581 else
583 in_pat = PATTERN (in_insn);
584 if (GET_CODE (in_pat) != PARALLEL)
585 return false;
587 for (i = 0; i < XVECLEN (in_pat, 0); i++)
589 in_exp = XVECEXP (in_pat, 0, i);
590 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
591 continue;
592 else if (GET_CODE (in_exp) != SET)
593 return false;
595 if (MEM_P (SET_DEST (in_exp)))
597 out_set = single_set (out_insn);
598 if (!out_set)
600 out_pat = PATTERN (out_insn);
601 if (GET_CODE (out_pat) != PARALLEL)
602 return false;
603 for (j = 0; j < XVECLEN (out_pat, 0); j++)
605 out_exp = XVECEXP (out_pat, 0, j);
606 if ((GET_CODE (out_exp) == CLOBBER)
607 || (GET_CODE (out_exp) == USE))
608 continue;
609 else if (GET_CODE (out_exp) != SET)
610 return false;
616 return store_data_bypass_p (out_insn, in_insn);
619 /* Return true if we have D-form addressing in altivec registers. */
620 static inline bool
621 mode_supports_vmx_dform (machine_mode mode)
623 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
626 /* Return true if we have D-form addressing in VSX registers. This addressing
627 is more limited than normal d-form addressing in that the offset must be
628 aligned on a 16-byte boundary. */
629 static inline bool
630 mode_supports_vsx_dform_quad (machine_mode mode)
632 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
633 != 0);
637 /* Target cpu costs. */
639 struct processor_costs {
640 const int mulsi; /* cost of SImode multiplication. */
641 const int mulsi_const; /* cost of SImode multiplication by constant. */
642 const int mulsi_const9; /* cost of SImode mult by short constant. */
643 const int muldi; /* cost of DImode multiplication. */
644 const int divsi; /* cost of SImode division. */
645 const int divdi; /* cost of DImode division. */
646 const int fp; /* cost of simple SFmode and DFmode insns. */
647 const int dmul; /* cost of DFmode multiplication (and fmadd). */
648 const int sdiv; /* cost of SFmode division (fdivs). */
649 const int ddiv; /* cost of DFmode division (fdiv). */
650 const int cache_line_size; /* cache line size in bytes. */
651 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
652 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
653 const int simultaneous_prefetches; /* number of parallel prefetch
654 operations. */
655 const int sfdf_convert; /* cost of SF->DF conversion. */
658 const struct processor_costs *rs6000_cost;
660 /* Processor costs (relative to an add) */
662 /* Instruction size costs on 32bit processors. */
663 static const
664 struct processor_costs size32_cost = {
665 COSTS_N_INSNS (1), /* mulsi */
666 COSTS_N_INSNS (1), /* mulsi_const */
667 COSTS_N_INSNS (1), /* mulsi_const9 */
668 COSTS_N_INSNS (1), /* muldi */
669 COSTS_N_INSNS (1), /* divsi */
670 COSTS_N_INSNS (1), /* divdi */
671 COSTS_N_INSNS (1), /* fp */
672 COSTS_N_INSNS (1), /* dmul */
673 COSTS_N_INSNS (1), /* sdiv */
674 COSTS_N_INSNS (1), /* ddiv */
675 32, /* cache line size */
676 0, /* l1 cache */
677 0, /* l2 cache */
678 0, /* streams */
679 0, /* SF->DF convert */
682 /* Instruction size costs on 64bit processors. */
683 static const
684 struct processor_costs size64_cost = {
685 COSTS_N_INSNS (1), /* mulsi */
686 COSTS_N_INSNS (1), /* mulsi_const */
687 COSTS_N_INSNS (1), /* mulsi_const9 */
688 COSTS_N_INSNS (1), /* muldi */
689 COSTS_N_INSNS (1), /* divsi */
690 COSTS_N_INSNS (1), /* divdi */
691 COSTS_N_INSNS (1), /* fp */
692 COSTS_N_INSNS (1), /* dmul */
693 COSTS_N_INSNS (1), /* sdiv */
694 COSTS_N_INSNS (1), /* ddiv */
695 128, /* cache line size */
696 0, /* l1 cache */
697 0, /* l2 cache */
698 0, /* streams */
699 0, /* SF->DF convert */
702 /* Instruction costs on RS64A processors. */
703 static const
704 struct processor_costs rs64a_cost = {
705 COSTS_N_INSNS (20), /* mulsi */
706 COSTS_N_INSNS (12), /* mulsi_const */
707 COSTS_N_INSNS (8), /* mulsi_const9 */
708 COSTS_N_INSNS (34), /* muldi */
709 COSTS_N_INSNS (65), /* divsi */
710 COSTS_N_INSNS (67), /* divdi */
711 COSTS_N_INSNS (4), /* fp */
712 COSTS_N_INSNS (4), /* dmul */
713 COSTS_N_INSNS (31), /* sdiv */
714 COSTS_N_INSNS (31), /* ddiv */
715 128, /* cache line size */
716 128, /* l1 cache */
717 2048, /* l2 cache */
718 1, /* streams */
719 0, /* SF->DF convert */
722 /* Instruction costs on MPCCORE processors. */
723 static const
724 struct processor_costs mpccore_cost = {
725 COSTS_N_INSNS (2), /* mulsi */
726 COSTS_N_INSNS (2), /* mulsi_const */
727 COSTS_N_INSNS (2), /* mulsi_const9 */
728 COSTS_N_INSNS (2), /* muldi */
729 COSTS_N_INSNS (6), /* divsi */
730 COSTS_N_INSNS (6), /* divdi */
731 COSTS_N_INSNS (4), /* fp */
732 COSTS_N_INSNS (5), /* dmul */
733 COSTS_N_INSNS (10), /* sdiv */
734 COSTS_N_INSNS (17), /* ddiv */
735 32, /* cache line size */
736 4, /* l1 cache */
737 16, /* l2 cache */
738 1, /* streams */
739 0, /* SF->DF convert */
742 /* Instruction costs on PPC403 processors. */
743 static const
744 struct processor_costs ppc403_cost = {
745 COSTS_N_INSNS (4), /* mulsi */
746 COSTS_N_INSNS (4), /* mulsi_const */
747 COSTS_N_INSNS (4), /* mulsi_const9 */
748 COSTS_N_INSNS (4), /* muldi */
749 COSTS_N_INSNS (33), /* divsi */
750 COSTS_N_INSNS (33), /* divdi */
751 COSTS_N_INSNS (11), /* fp */
752 COSTS_N_INSNS (11), /* dmul */
753 COSTS_N_INSNS (11), /* sdiv */
754 COSTS_N_INSNS (11), /* ddiv */
755 32, /* cache line size */
756 4, /* l1 cache */
757 16, /* l2 cache */
758 1, /* streams */
759 0, /* SF->DF convert */
762 /* Instruction costs on PPC405 processors. */
763 static const
764 struct processor_costs ppc405_cost = {
765 COSTS_N_INSNS (5), /* mulsi */
766 COSTS_N_INSNS (4), /* mulsi_const */
767 COSTS_N_INSNS (3), /* mulsi_const9 */
768 COSTS_N_INSNS (5), /* muldi */
769 COSTS_N_INSNS (35), /* divsi */
770 COSTS_N_INSNS (35), /* divdi */
771 COSTS_N_INSNS (11), /* fp */
772 COSTS_N_INSNS (11), /* dmul */
773 COSTS_N_INSNS (11), /* sdiv */
774 COSTS_N_INSNS (11), /* ddiv */
775 32, /* cache line size */
776 16, /* l1 cache */
777 128, /* l2 cache */
778 1, /* streams */
779 0, /* SF->DF convert */
782 /* Instruction costs on PPC440 processors. */
783 static const
784 struct processor_costs ppc440_cost = {
785 COSTS_N_INSNS (3), /* mulsi */
786 COSTS_N_INSNS (2), /* mulsi_const */
787 COSTS_N_INSNS (2), /* mulsi_const9 */
788 COSTS_N_INSNS (3), /* muldi */
789 COSTS_N_INSNS (34), /* divsi */
790 COSTS_N_INSNS (34), /* divdi */
791 COSTS_N_INSNS (5), /* fp */
792 COSTS_N_INSNS (5), /* dmul */
793 COSTS_N_INSNS (19), /* sdiv */
794 COSTS_N_INSNS (33), /* ddiv */
795 32, /* cache line size */
796 32, /* l1 cache */
797 256, /* l2 cache */
798 1, /* streams */
799 0, /* SF->DF convert */
802 /* Instruction costs on PPC476 processors. */
803 static const
804 struct processor_costs ppc476_cost = {
805 COSTS_N_INSNS (4), /* mulsi */
806 COSTS_N_INSNS (4), /* mulsi_const */
807 COSTS_N_INSNS (4), /* mulsi_const9 */
808 COSTS_N_INSNS (4), /* muldi */
809 COSTS_N_INSNS (11), /* divsi */
810 COSTS_N_INSNS (11), /* divdi */
811 COSTS_N_INSNS (6), /* fp */
812 COSTS_N_INSNS (6), /* dmul */
813 COSTS_N_INSNS (19), /* sdiv */
814 COSTS_N_INSNS (33), /* ddiv */
815 32, /* l1 cache line size */
816 32, /* l1 cache */
817 512, /* l2 cache */
818 1, /* streams */
819 0, /* SF->DF convert */
822 /* Instruction costs on PPC601 processors. */
823 static const
824 struct processor_costs ppc601_cost = {
825 COSTS_N_INSNS (5), /* mulsi */
826 COSTS_N_INSNS (5), /* mulsi_const */
827 COSTS_N_INSNS (5), /* mulsi_const9 */
828 COSTS_N_INSNS (5), /* muldi */
829 COSTS_N_INSNS (36), /* divsi */
830 COSTS_N_INSNS (36), /* divdi */
831 COSTS_N_INSNS (4), /* fp */
832 COSTS_N_INSNS (5), /* dmul */
833 COSTS_N_INSNS (17), /* sdiv */
834 COSTS_N_INSNS (31), /* ddiv */
835 32, /* cache line size */
836 32, /* l1 cache */
837 256, /* l2 cache */
838 1, /* streams */
839 0, /* SF->DF convert */
842 /* Instruction costs on PPC603 processors. */
843 static const
844 struct processor_costs ppc603_cost = {
845 COSTS_N_INSNS (5), /* mulsi */
846 COSTS_N_INSNS (3), /* mulsi_const */
847 COSTS_N_INSNS (2), /* mulsi_const9 */
848 COSTS_N_INSNS (5), /* muldi */
849 COSTS_N_INSNS (37), /* divsi */
850 COSTS_N_INSNS (37), /* divdi */
851 COSTS_N_INSNS (3), /* fp */
852 COSTS_N_INSNS (4), /* dmul */
853 COSTS_N_INSNS (18), /* sdiv */
854 COSTS_N_INSNS (33), /* ddiv */
855 32, /* cache line size */
856 8, /* l1 cache */
857 64, /* l2 cache */
858 1, /* streams */
859 0, /* SF->DF convert */
862 /* Instruction costs on PPC604 processors. */
863 static const
864 struct processor_costs ppc604_cost = {
865 COSTS_N_INSNS (4), /* mulsi */
866 COSTS_N_INSNS (4), /* mulsi_const */
867 COSTS_N_INSNS (4), /* mulsi_const9 */
868 COSTS_N_INSNS (4), /* muldi */
869 COSTS_N_INSNS (20), /* divsi */
870 COSTS_N_INSNS (20), /* divdi */
871 COSTS_N_INSNS (3), /* fp */
872 COSTS_N_INSNS (3), /* dmul */
873 COSTS_N_INSNS (18), /* sdiv */
874 COSTS_N_INSNS (32), /* ddiv */
875 32, /* cache line size */
876 16, /* l1 cache */
877 512, /* l2 cache */
878 1, /* streams */
879 0, /* SF->DF convert */
882 /* Instruction costs on PPC604e processors. */
883 static const
884 struct processor_costs ppc604e_cost = {
885 COSTS_N_INSNS (2), /* mulsi */
886 COSTS_N_INSNS (2), /* mulsi_const */
887 COSTS_N_INSNS (2), /* mulsi_const9 */
888 COSTS_N_INSNS (2), /* muldi */
889 COSTS_N_INSNS (20), /* divsi */
890 COSTS_N_INSNS (20), /* divdi */
891 COSTS_N_INSNS (3), /* fp */
892 COSTS_N_INSNS (3), /* dmul */
893 COSTS_N_INSNS (18), /* sdiv */
894 COSTS_N_INSNS (32), /* ddiv */
895 32, /* cache line size */
896 32, /* l1 cache */
897 1024, /* l2 cache */
898 1, /* streams */
899 0, /* SF->DF convert */
902 /* Instruction costs on PPC620 processors. */
903 static const
904 struct processor_costs ppc620_cost = {
905 COSTS_N_INSNS (5), /* mulsi */
906 COSTS_N_INSNS (4), /* mulsi_const */
907 COSTS_N_INSNS (3), /* mulsi_const9 */
908 COSTS_N_INSNS (7), /* muldi */
909 COSTS_N_INSNS (21), /* divsi */
910 COSTS_N_INSNS (37), /* divdi */
911 COSTS_N_INSNS (3), /* fp */
912 COSTS_N_INSNS (3), /* dmul */
913 COSTS_N_INSNS (18), /* sdiv */
914 COSTS_N_INSNS (32), /* ddiv */
915 128, /* cache line size */
916 32, /* l1 cache */
917 1024, /* l2 cache */
918 1, /* streams */
919 0, /* SF->DF convert */
922 /* Instruction costs on PPC630 processors. */
923 static const
924 struct processor_costs ppc630_cost = {
925 COSTS_N_INSNS (5), /* mulsi */
926 COSTS_N_INSNS (4), /* mulsi_const */
927 COSTS_N_INSNS (3), /* mulsi_const9 */
928 COSTS_N_INSNS (7), /* muldi */
929 COSTS_N_INSNS (21), /* divsi */
930 COSTS_N_INSNS (37), /* divdi */
931 COSTS_N_INSNS (3), /* fp */
932 COSTS_N_INSNS (3), /* dmul */
933 COSTS_N_INSNS (17), /* sdiv */
934 COSTS_N_INSNS (21), /* ddiv */
935 128, /* cache line size */
936 64, /* l1 cache */
937 1024, /* l2 cache */
938 1, /* streams */
939 0, /* SF->DF convert */
942 /* Instruction costs on Cell processor. */
943 /* COSTS_N_INSNS (1) ~ one add. */
944 static const
945 struct processor_costs ppccell_cost = {
946 COSTS_N_INSNS (9/2)+2, /* mulsi */
947 COSTS_N_INSNS (6/2), /* mulsi_const */
948 COSTS_N_INSNS (6/2), /* mulsi_const9 */
949 COSTS_N_INSNS (15/2)+2, /* muldi */
950 COSTS_N_INSNS (38/2), /* divsi */
951 COSTS_N_INSNS (70/2), /* divdi */
952 COSTS_N_INSNS (10/2), /* fp */
953 COSTS_N_INSNS (10/2), /* dmul */
954 COSTS_N_INSNS (74/2), /* sdiv */
955 COSTS_N_INSNS (74/2), /* ddiv */
956 128, /* cache line size */
957 32, /* l1 cache */
958 512, /* l2 cache */
959 6, /* streams */
960 0, /* SF->DF convert */
963 /* Instruction costs on PPC750 and PPC7400 processors. */
964 static const
965 struct processor_costs ppc750_cost = {
966 COSTS_N_INSNS (5), /* mulsi */
967 COSTS_N_INSNS (3), /* mulsi_const */
968 COSTS_N_INSNS (2), /* mulsi_const9 */
969 COSTS_N_INSNS (5), /* muldi */
970 COSTS_N_INSNS (17), /* divsi */
971 COSTS_N_INSNS (17), /* divdi */
972 COSTS_N_INSNS (3), /* fp */
973 COSTS_N_INSNS (3), /* dmul */
974 COSTS_N_INSNS (17), /* sdiv */
975 COSTS_N_INSNS (31), /* ddiv */
976 32, /* cache line size */
977 32, /* l1 cache */
978 512, /* l2 cache */
979 1, /* streams */
980 0, /* SF->DF convert */
983 /* Instruction costs on PPC7450 processors. */
984 static const
985 struct processor_costs ppc7450_cost = {
986 COSTS_N_INSNS (4), /* mulsi */
987 COSTS_N_INSNS (3), /* mulsi_const */
988 COSTS_N_INSNS (3), /* mulsi_const9 */
989 COSTS_N_INSNS (4), /* muldi */
990 COSTS_N_INSNS (23), /* divsi */
991 COSTS_N_INSNS (23), /* divdi */
992 COSTS_N_INSNS (5), /* fp */
993 COSTS_N_INSNS (5), /* dmul */
994 COSTS_N_INSNS (21), /* sdiv */
995 COSTS_N_INSNS (35), /* ddiv */
996 32, /* cache line size */
997 32, /* l1 cache */
998 1024, /* l2 cache */
999 1, /* streams */
1000 0, /* SF->DF convert */
1003 /* Instruction costs on PPC8540 processors. */
1004 static const
1005 struct processor_costs ppc8540_cost = {
1006 COSTS_N_INSNS (4), /* mulsi */
1007 COSTS_N_INSNS (4), /* mulsi_const */
1008 COSTS_N_INSNS (4), /* mulsi_const9 */
1009 COSTS_N_INSNS (4), /* muldi */
1010 COSTS_N_INSNS (19), /* divsi */
1011 COSTS_N_INSNS (19), /* divdi */
1012 COSTS_N_INSNS (4), /* fp */
1013 COSTS_N_INSNS (4), /* dmul */
1014 COSTS_N_INSNS (29), /* sdiv */
1015 COSTS_N_INSNS (29), /* ddiv */
1016 32, /* cache line size */
1017 32, /* l1 cache */
1018 256, /* l2 cache */
1019 1, /* prefetch streams /*/
1020 0, /* SF->DF convert */
1023 /* Instruction costs on E300C2 and E300C3 cores. */
1024 static const
1025 struct processor_costs ppce300c2c3_cost = {
1026 COSTS_N_INSNS (4), /* mulsi */
1027 COSTS_N_INSNS (4), /* mulsi_const */
1028 COSTS_N_INSNS (4), /* mulsi_const9 */
1029 COSTS_N_INSNS (4), /* muldi */
1030 COSTS_N_INSNS (19), /* divsi */
1031 COSTS_N_INSNS (19), /* divdi */
1032 COSTS_N_INSNS (3), /* fp */
1033 COSTS_N_INSNS (4), /* dmul */
1034 COSTS_N_INSNS (18), /* sdiv */
1035 COSTS_N_INSNS (33), /* ddiv */
1037 16, /* l1 cache */
1038 16, /* l2 cache */
1039 1, /* prefetch streams /*/
1040 0, /* SF->DF convert */
1043 /* Instruction costs on PPCE500MC processors. */
1044 static const
1045 struct processor_costs ppce500mc_cost = {
1046 COSTS_N_INSNS (4), /* mulsi */
1047 COSTS_N_INSNS (4), /* mulsi_const */
1048 COSTS_N_INSNS (4), /* mulsi_const9 */
1049 COSTS_N_INSNS (4), /* muldi */
1050 COSTS_N_INSNS (14), /* divsi */
1051 COSTS_N_INSNS (14), /* divdi */
1052 COSTS_N_INSNS (8), /* fp */
1053 COSTS_N_INSNS (10), /* dmul */
1054 COSTS_N_INSNS (36), /* sdiv */
1055 COSTS_N_INSNS (66), /* ddiv */
1056 64, /* cache line size */
1057 32, /* l1 cache */
1058 128, /* l2 cache */
1059 1, /* prefetch streams /*/
1060 0, /* SF->DF convert */
1063 /* Instruction costs on PPCE500MC64 processors. */
1064 static const
1065 struct processor_costs ppce500mc64_cost = {
1066 COSTS_N_INSNS (4), /* mulsi */
1067 COSTS_N_INSNS (4), /* mulsi_const */
1068 COSTS_N_INSNS (4), /* mulsi_const9 */
1069 COSTS_N_INSNS (4), /* muldi */
1070 COSTS_N_INSNS (14), /* divsi */
1071 COSTS_N_INSNS (14), /* divdi */
1072 COSTS_N_INSNS (4), /* fp */
1073 COSTS_N_INSNS (10), /* dmul */
1074 COSTS_N_INSNS (36), /* sdiv */
1075 COSTS_N_INSNS (66), /* ddiv */
1076 64, /* cache line size */
1077 32, /* l1 cache */
1078 128, /* l2 cache */
1079 1, /* prefetch streams /*/
1080 0, /* SF->DF convert */
1083 /* Instruction costs on PPCE5500 processors. */
1084 static const
1085 struct processor_costs ppce5500_cost = {
1086 COSTS_N_INSNS (5), /* mulsi */
1087 COSTS_N_INSNS (5), /* mulsi_const */
1088 COSTS_N_INSNS (4), /* mulsi_const9 */
1089 COSTS_N_INSNS (5), /* muldi */
1090 COSTS_N_INSNS (14), /* divsi */
1091 COSTS_N_INSNS (14), /* divdi */
1092 COSTS_N_INSNS (7), /* fp */
1093 COSTS_N_INSNS (10), /* dmul */
1094 COSTS_N_INSNS (36), /* sdiv */
1095 COSTS_N_INSNS (66), /* ddiv */
1096 64, /* cache line size */
1097 32, /* l1 cache */
1098 128, /* l2 cache */
1099 1, /* prefetch streams /*/
1100 0, /* SF->DF convert */
1103 /* Instruction costs on PPCE6500 processors. */
1104 static const
1105 struct processor_costs ppce6500_cost = {
1106 COSTS_N_INSNS (5), /* mulsi */
1107 COSTS_N_INSNS (5), /* mulsi_const */
1108 COSTS_N_INSNS (4), /* mulsi_const9 */
1109 COSTS_N_INSNS (5), /* muldi */
1110 COSTS_N_INSNS (14), /* divsi */
1111 COSTS_N_INSNS (14), /* divdi */
1112 COSTS_N_INSNS (7), /* fp */
1113 COSTS_N_INSNS (10), /* dmul */
1114 COSTS_N_INSNS (36), /* sdiv */
1115 COSTS_N_INSNS (66), /* ddiv */
1116 64, /* cache line size */
1117 32, /* l1 cache */
1118 128, /* l2 cache */
1119 1, /* prefetch streams /*/
1120 0, /* SF->DF convert */
1123 /* Instruction costs on AppliedMicro Titan processors. */
1124 static const
1125 struct processor_costs titan_cost = {
1126 COSTS_N_INSNS (5), /* mulsi */
1127 COSTS_N_INSNS (5), /* mulsi_const */
1128 COSTS_N_INSNS (5), /* mulsi_const9 */
1129 COSTS_N_INSNS (5), /* muldi */
1130 COSTS_N_INSNS (18), /* divsi */
1131 COSTS_N_INSNS (18), /* divdi */
1132 COSTS_N_INSNS (10), /* fp */
1133 COSTS_N_INSNS (10), /* dmul */
1134 COSTS_N_INSNS (46), /* sdiv */
1135 COSTS_N_INSNS (72), /* ddiv */
1136 32, /* cache line size */
1137 32, /* l1 cache */
1138 512, /* l2 cache */
1139 1, /* prefetch streams /*/
1140 0, /* SF->DF convert */
1143 /* Instruction costs on POWER4 and POWER5 processors. */
1144 static const
1145 struct processor_costs power4_cost = {
1146 COSTS_N_INSNS (3), /* mulsi */
1147 COSTS_N_INSNS (2), /* mulsi_const */
1148 COSTS_N_INSNS (2), /* mulsi_const9 */
1149 COSTS_N_INSNS (4), /* muldi */
1150 COSTS_N_INSNS (18), /* divsi */
1151 COSTS_N_INSNS (34), /* divdi */
1152 COSTS_N_INSNS (3), /* fp */
1153 COSTS_N_INSNS (3), /* dmul */
1154 COSTS_N_INSNS (17), /* sdiv */
1155 COSTS_N_INSNS (17), /* ddiv */
1156 128, /* cache line size */
1157 32, /* l1 cache */
1158 1024, /* l2 cache */
1159 8, /* prefetch streams /*/
1160 0, /* SF->DF convert */
1163 /* Instruction costs on POWER6 processors. */
1164 static const
1165 struct processor_costs power6_cost = {
1166 COSTS_N_INSNS (8), /* mulsi */
1167 COSTS_N_INSNS (8), /* mulsi_const */
1168 COSTS_N_INSNS (8), /* mulsi_const9 */
1169 COSTS_N_INSNS (8), /* muldi */
1170 COSTS_N_INSNS (22), /* divsi */
1171 COSTS_N_INSNS (28), /* divdi */
1172 COSTS_N_INSNS (3), /* fp */
1173 COSTS_N_INSNS (3), /* dmul */
1174 COSTS_N_INSNS (13), /* sdiv */
1175 COSTS_N_INSNS (16), /* ddiv */
1176 128, /* cache line size */
1177 64, /* l1 cache */
1178 2048, /* l2 cache */
1179 16, /* prefetch streams */
1180 0, /* SF->DF convert */
1183 /* Instruction costs on POWER7 processors. */
1184 static const
1185 struct processor_costs power7_cost = {
1186 COSTS_N_INSNS (2), /* mulsi */
1187 COSTS_N_INSNS (2), /* mulsi_const */
1188 COSTS_N_INSNS (2), /* mulsi_const9 */
1189 COSTS_N_INSNS (2), /* muldi */
1190 COSTS_N_INSNS (18), /* divsi */
1191 COSTS_N_INSNS (34), /* divdi */
1192 COSTS_N_INSNS (3), /* fp */
1193 COSTS_N_INSNS (3), /* dmul */
1194 COSTS_N_INSNS (13), /* sdiv */
1195 COSTS_N_INSNS (16), /* ddiv */
1196 128, /* cache line size */
1197 32, /* l1 cache */
1198 256, /* l2 cache */
1199 12, /* prefetch streams */
1200 COSTS_N_INSNS (3), /* SF->DF convert */
1203 /* Instruction costs on POWER8 processors. */
1204 static const
1205 struct processor_costs power8_cost = {
1206 COSTS_N_INSNS (3), /* mulsi */
1207 COSTS_N_INSNS (3), /* mulsi_const */
1208 COSTS_N_INSNS (3), /* mulsi_const9 */
1209 COSTS_N_INSNS (3), /* muldi */
1210 COSTS_N_INSNS (19), /* divsi */
1211 COSTS_N_INSNS (35), /* divdi */
1212 COSTS_N_INSNS (3), /* fp */
1213 COSTS_N_INSNS (3), /* dmul */
1214 COSTS_N_INSNS (14), /* sdiv */
1215 COSTS_N_INSNS (17), /* ddiv */
1216 128, /* cache line size */
1217 32, /* l1 cache */
1218 256, /* l2 cache */
1219 12, /* prefetch streams */
1220 COSTS_N_INSNS (3), /* SF->DF convert */
1223 /* Instruction costs on POWER9 processors. */
1224 static const
1225 struct processor_costs power9_cost = {
1226 COSTS_N_INSNS (3), /* mulsi */
1227 COSTS_N_INSNS (3), /* mulsi_const */
1228 COSTS_N_INSNS (3), /* mulsi_const9 */
1229 COSTS_N_INSNS (3), /* muldi */
1230 COSTS_N_INSNS (8), /* divsi */
1231 COSTS_N_INSNS (12), /* divdi */
1232 COSTS_N_INSNS (3), /* fp */
1233 COSTS_N_INSNS (3), /* dmul */
1234 COSTS_N_INSNS (13), /* sdiv */
1235 COSTS_N_INSNS (18), /* ddiv */
1236 128, /* cache line size */
1237 32, /* l1 cache */
1238 512, /* l2 cache */
1239 8, /* prefetch streams */
1240 COSTS_N_INSNS (3), /* SF->DF convert */
1243 /* Instruction costs on POWER A2 processors. */
1244 static const
1245 struct processor_costs ppca2_cost = {
1246 COSTS_N_INSNS (16), /* mulsi */
1247 COSTS_N_INSNS (16), /* mulsi_const */
1248 COSTS_N_INSNS (16), /* mulsi_const9 */
1249 COSTS_N_INSNS (16), /* muldi */
1250 COSTS_N_INSNS (22), /* divsi */
1251 COSTS_N_INSNS (28), /* divdi */
1252 COSTS_N_INSNS (3), /* fp */
1253 COSTS_N_INSNS (3), /* dmul */
1254 COSTS_N_INSNS (59), /* sdiv */
1255 COSTS_N_INSNS (72), /* ddiv */
1257 16, /* l1 cache */
1258 2048, /* l2 cache */
1259 16, /* prefetch streams */
1260 0, /* SF->DF convert */
1264 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1265 #undef RS6000_BUILTIN_0
1266 #undef RS6000_BUILTIN_1
1267 #undef RS6000_BUILTIN_2
1268 #undef RS6000_BUILTIN_3
1269 #undef RS6000_BUILTIN_A
1270 #undef RS6000_BUILTIN_D
1271 #undef RS6000_BUILTIN_H
1272 #undef RS6000_BUILTIN_P
1273 #undef RS6000_BUILTIN_Q
1274 #undef RS6000_BUILTIN_X
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 struct rs6000_builtin_info_type {
1307 const char *name;
1308 const enum insn_code icode;
1309 const HOST_WIDE_INT mask;
1310 const unsigned attr;
1313 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1315 #include "rs6000-builtin.def"
1318 #undef RS6000_BUILTIN_0
1319 #undef RS6000_BUILTIN_1
1320 #undef RS6000_BUILTIN_2
1321 #undef RS6000_BUILTIN_3
1322 #undef RS6000_BUILTIN_A
1323 #undef RS6000_BUILTIN_D
1324 #undef RS6000_BUILTIN_H
1325 #undef RS6000_BUILTIN_P
1326 #undef RS6000_BUILTIN_Q
1327 #undef RS6000_BUILTIN_X
1329 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1330 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1333 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1334 static struct machine_function * rs6000_init_machine_status (void);
1335 static int rs6000_ra_ever_killed (void);
1336 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1338 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1339 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1340 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1341 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1342 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1343 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1344 bool);
1345 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1346 unsigned int);
1347 static bool is_microcoded_insn (rtx_insn *);
1348 static bool is_nonpipeline_insn (rtx_insn *);
1349 static bool is_cracked_insn (rtx_insn *);
1350 static bool is_load_insn (rtx, rtx *);
1351 static bool is_store_insn (rtx, rtx *);
1352 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1353 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1354 static bool insn_must_be_first_in_group (rtx_insn *);
1355 static bool insn_must_be_last_in_group (rtx_insn *);
1356 static void altivec_init_builtins (void);
1357 static tree builtin_function_type (machine_mode, machine_mode,
1358 machine_mode, machine_mode,
1359 enum rs6000_builtins, const char *name);
1360 static void rs6000_common_init_builtins (void);
1361 static void paired_init_builtins (void);
1362 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1363 static void htm_init_builtins (void);
1364 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1365 static rs6000_stack_t *rs6000_stack_info (void);
1366 static void is_altivec_return_reg (rtx, void *);
1367 int easy_vector_constant (rtx, machine_mode);
1368 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1369 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1370 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1371 bool, bool);
1372 #if TARGET_MACHO
1373 static void macho_branch_islands (void);
1374 #endif
1375 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1376 int, int *);
1377 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1378 int, int, int *);
1379 static bool rs6000_mode_dependent_address (const_rtx);
1380 static bool rs6000_debug_mode_dependent_address (const_rtx);
1381 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1382 machine_mode, rtx);
1383 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1384 machine_mode,
1385 rtx);
1386 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1387 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1388 enum reg_class);
1389 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1390 machine_mode);
1391 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1392 enum reg_class,
1393 machine_mode);
1394 static bool rs6000_cannot_change_mode_class (machine_mode,
1395 machine_mode,
1396 enum reg_class);
1397 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1398 machine_mode,
1399 enum reg_class);
1400 static bool rs6000_save_toc_in_prologue_p (void);
1401 static rtx rs6000_internal_arg_pointer (void);
1403 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1404 int, int *)
1405 = rs6000_legitimize_reload_address;
1407 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1408 = rs6000_mode_dependent_address;
1410 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1411 machine_mode, rtx)
1412 = rs6000_secondary_reload_class;
1414 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1415 = rs6000_preferred_reload_class;
1417 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1418 machine_mode)
1419 = rs6000_secondary_memory_needed;
1421 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1422 machine_mode,
1423 enum reg_class)
1424 = rs6000_cannot_change_mode_class;
1426 const int INSN_NOT_AVAILABLE = -1;
1428 static void rs6000_print_isa_options (FILE *, int, const char *,
1429 HOST_WIDE_INT);
1430 static void rs6000_print_builtin_options (FILE *, int, const char *,
1431 HOST_WIDE_INT);
1432 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1434 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1435 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1436 enum rs6000_reg_type,
1437 machine_mode,
1438 secondary_reload_info *,
1439 bool);
1440 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1441 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1442 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1444 /* Hash table stuff for keeping track of TOC entries. */
1446 struct GTY((for_user)) toc_hash_struct
1448 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1449 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1450 rtx key;
1451 machine_mode key_mode;
1452 int labelno;
1455 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1457 static hashval_t hash (toc_hash_struct *);
1458 static bool equal (toc_hash_struct *, toc_hash_struct *);
1461 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1463 /* Hash table to keep track of the argument types for builtin functions. */
1465 struct GTY((for_user)) builtin_hash_struct
1467 tree type;
1468 machine_mode mode[4]; /* return value + 3 arguments. */
1469 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1472 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1474 static hashval_t hash (builtin_hash_struct *);
1475 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1478 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1481 /* Default register names. */
1482 char rs6000_reg_names[][8] =
1484 "0", "1", "2", "3", "4", "5", "6", "7",
1485 "8", "9", "10", "11", "12", "13", "14", "15",
1486 "16", "17", "18", "19", "20", "21", "22", "23",
1487 "24", "25", "26", "27", "28", "29", "30", "31",
1488 "0", "1", "2", "3", "4", "5", "6", "7",
1489 "8", "9", "10", "11", "12", "13", "14", "15",
1490 "16", "17", "18", "19", "20", "21", "22", "23",
1491 "24", "25", "26", "27", "28", "29", "30", "31",
1492 "mq", "lr", "ctr","ap",
1493 "0", "1", "2", "3", "4", "5", "6", "7",
1494 "ca",
1495 /* AltiVec registers. */
1496 "0", "1", "2", "3", "4", "5", "6", "7",
1497 "8", "9", "10", "11", "12", "13", "14", "15",
1498 "16", "17", "18", "19", "20", "21", "22", "23",
1499 "24", "25", "26", "27", "28", "29", "30", "31",
1500 "vrsave", "vscr",
1501 /* Soft frame pointer. */
1502 "sfp",
1503 /* HTM SPR registers. */
1504 "tfhar", "tfiar", "texasr"
1507 #ifdef TARGET_REGNAMES
1508 static const char alt_reg_names[][8] =
1510 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1511 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1512 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1513 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1514 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1515 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1516 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1517 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1518 "mq", "lr", "ctr", "ap",
1519 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1520 "ca",
1521 /* AltiVec registers. */
1522 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1523 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1524 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1525 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1526 "vrsave", "vscr",
1527 /* Soft frame pointer. */
1528 "sfp",
1529 /* HTM SPR registers. */
1530 "tfhar", "tfiar", "texasr"
1532 #endif
1534 /* Table of valid machine attributes. */
1536 static const struct attribute_spec rs6000_attribute_table[] =
1538 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1539 affects_type_identity } */
1540 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1541 false },
1542 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1543 false },
1544 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1545 false },
1546 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1547 false },
1548 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1549 false },
1550 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1551 SUBTARGET_ATTRIBUTE_TABLE,
1552 #endif
1553 { NULL, 0, 0, false, false, false, NULL, false }
1556 #ifndef TARGET_PROFILE_KERNEL
1557 #define TARGET_PROFILE_KERNEL 0
1558 #endif
1560 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1561 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1563 /* Initialize the GCC target structure. */
1564 #undef TARGET_ATTRIBUTE_TABLE
1565 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1566 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1567 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1568 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1569 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1571 #undef TARGET_ASM_ALIGNED_DI_OP
1572 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1574 /* Default unaligned ops are only provided for ELF. Find the ops needed
1575 for non-ELF systems. */
1576 #ifndef OBJECT_FORMAT_ELF
1577 #if TARGET_XCOFF
1578 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1579 64-bit targets. */
1580 #undef TARGET_ASM_UNALIGNED_HI_OP
1581 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1582 #undef TARGET_ASM_UNALIGNED_SI_OP
1583 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1584 #undef TARGET_ASM_UNALIGNED_DI_OP
1585 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1586 #else
1587 /* For Darwin. */
1588 #undef TARGET_ASM_UNALIGNED_HI_OP
1589 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1590 #undef TARGET_ASM_UNALIGNED_SI_OP
1591 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1592 #undef TARGET_ASM_UNALIGNED_DI_OP
1593 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1594 #undef TARGET_ASM_ALIGNED_DI_OP
1595 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1596 #endif
1597 #endif
1599 /* This hook deals with fixups for relocatable code and DI-mode objects
1600 in 64-bit code. */
1601 #undef TARGET_ASM_INTEGER
1602 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1604 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1605 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1606 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1607 #endif
1609 #undef TARGET_SET_UP_BY_PROLOGUE
1610 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1612 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1613 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1614 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1615 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1616 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1617 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1618 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1619 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1620 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1621 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1622 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1623 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1625 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1626 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1628 #undef TARGET_INTERNAL_ARG_POINTER
1629 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1631 #undef TARGET_HAVE_TLS
1632 #define TARGET_HAVE_TLS HAVE_AS_TLS
1634 #undef TARGET_CANNOT_FORCE_CONST_MEM
1635 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1637 #undef TARGET_DELEGITIMIZE_ADDRESS
1638 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1640 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1641 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1643 #undef TARGET_LEGITIMATE_COMBINED_INSN
1644 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1646 #undef TARGET_ASM_FUNCTION_PROLOGUE
1647 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1648 #undef TARGET_ASM_FUNCTION_EPILOGUE
1649 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1651 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1652 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1654 #undef TARGET_LEGITIMIZE_ADDRESS
1655 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1657 #undef TARGET_SCHED_VARIABLE_ISSUE
1658 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1660 #undef TARGET_SCHED_ISSUE_RATE
1661 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1662 #undef TARGET_SCHED_ADJUST_COST
1663 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1664 #undef TARGET_SCHED_ADJUST_PRIORITY
1665 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1666 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1667 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1668 #undef TARGET_SCHED_INIT
1669 #define TARGET_SCHED_INIT rs6000_sched_init
1670 #undef TARGET_SCHED_FINISH
1671 #define TARGET_SCHED_FINISH rs6000_sched_finish
1672 #undef TARGET_SCHED_REORDER
1673 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1674 #undef TARGET_SCHED_REORDER2
1675 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1677 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1678 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1680 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1681 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1683 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1684 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1685 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1686 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1687 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1688 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1689 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1690 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1692 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1693 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1695 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1696 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1697 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1698 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1699 rs6000_builtin_support_vector_misalignment
1700 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1701 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1702 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1703 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1704 rs6000_builtin_vectorization_cost
1705 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1706 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1707 rs6000_preferred_simd_mode
1708 #undef TARGET_VECTORIZE_INIT_COST
1709 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1710 #undef TARGET_VECTORIZE_ADD_STMT_COST
1711 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1712 #undef TARGET_VECTORIZE_FINISH_COST
1713 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1714 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1715 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1717 #undef TARGET_INIT_BUILTINS
1718 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1719 #undef TARGET_BUILTIN_DECL
1720 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1722 #undef TARGET_FOLD_BUILTIN
1723 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1724 #undef TARGET_GIMPLE_FOLD_BUILTIN
1725 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1727 #undef TARGET_EXPAND_BUILTIN
1728 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1730 #undef TARGET_MANGLE_TYPE
1731 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1733 #undef TARGET_INIT_LIBFUNCS
1734 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1736 #if TARGET_MACHO
1737 #undef TARGET_BINDS_LOCAL_P
1738 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1739 #endif
1741 #undef TARGET_MS_BITFIELD_LAYOUT_P
1742 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1744 #undef TARGET_ASM_OUTPUT_MI_THUNK
1745 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1747 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1748 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1750 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1751 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1753 #undef TARGET_REGISTER_MOVE_COST
1754 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1755 #undef TARGET_MEMORY_MOVE_COST
1756 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1757 #undef TARGET_CANNOT_COPY_INSN_P
1758 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1759 #undef TARGET_RTX_COSTS
1760 #define TARGET_RTX_COSTS rs6000_rtx_costs
1761 #undef TARGET_ADDRESS_COST
1762 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1764 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1765 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1767 #undef TARGET_PROMOTE_FUNCTION_MODE
1768 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1770 #undef TARGET_RETURN_IN_MEMORY
1771 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1773 #undef TARGET_RETURN_IN_MSB
1774 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1776 #undef TARGET_SETUP_INCOMING_VARARGS
1777 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1779 /* Always strict argument naming on rs6000. */
1780 #undef TARGET_STRICT_ARGUMENT_NAMING
1781 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1782 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1783 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1784 #undef TARGET_SPLIT_COMPLEX_ARG
1785 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1786 #undef TARGET_MUST_PASS_IN_STACK
1787 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1788 #undef TARGET_PASS_BY_REFERENCE
1789 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1790 #undef TARGET_ARG_PARTIAL_BYTES
1791 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1792 #undef TARGET_FUNCTION_ARG_ADVANCE
1793 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1794 #undef TARGET_FUNCTION_ARG
1795 #define TARGET_FUNCTION_ARG rs6000_function_arg
1796 #undef TARGET_FUNCTION_ARG_BOUNDARY
1797 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1799 #undef TARGET_BUILD_BUILTIN_VA_LIST
1800 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1802 #undef TARGET_EXPAND_BUILTIN_VA_START
1803 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1805 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1806 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1808 #undef TARGET_EH_RETURN_FILTER_MODE
1809 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1811 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1812 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1814 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1815 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1817 #undef TARGET_FLOATN_MODE
1818 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1820 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1821 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1823 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1824 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1826 #undef TARGET_MD_ASM_ADJUST
1827 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1829 #undef TARGET_OPTION_OVERRIDE
1830 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1832 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1833 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1834 rs6000_builtin_vectorized_function
1836 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1837 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1838 rs6000_builtin_md_vectorized_function
1840 #undef TARGET_STACK_PROTECT_GUARD
1841 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1843 #if !TARGET_MACHO
1844 #undef TARGET_STACK_PROTECT_FAIL
1845 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1846 #endif
1848 #ifdef HAVE_AS_TLS
1849 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1850 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1851 #endif
1853 /* Use a 32-bit anchor range. This leads to sequences like:
1855 addis tmp,anchor,high
1856 add dest,tmp,low
1858 where tmp itself acts as an anchor, and can be shared between
1859 accesses to the same 64k page. */
1860 #undef TARGET_MIN_ANCHOR_OFFSET
1861 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1862 #undef TARGET_MAX_ANCHOR_OFFSET
1863 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1864 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1865 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1866 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1867 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1869 #undef TARGET_BUILTIN_RECIPROCAL
1870 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1872 #undef TARGET_SECONDARY_RELOAD
1873 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1875 #undef TARGET_LEGITIMATE_ADDRESS_P
1876 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1878 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1879 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1881 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1882 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1884 #undef TARGET_CAN_ELIMINATE
1885 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1887 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1888 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1890 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1891 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1893 #undef TARGET_TRAMPOLINE_INIT
1894 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1896 #undef TARGET_FUNCTION_VALUE
1897 #define TARGET_FUNCTION_VALUE rs6000_function_value
1899 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1900 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1902 #undef TARGET_OPTION_SAVE
1903 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1905 #undef TARGET_OPTION_RESTORE
1906 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1908 #undef TARGET_OPTION_PRINT
1909 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1911 #undef TARGET_CAN_INLINE_P
1912 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1914 #undef TARGET_SET_CURRENT_FUNCTION
1915 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1917 #undef TARGET_LEGITIMATE_CONSTANT_P
1918 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1920 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1921 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1923 #undef TARGET_CAN_USE_DOLOOP_P
1924 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1926 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1927 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1929 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1930 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1931 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1932 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1933 #undef TARGET_UNWIND_WORD_MODE
1934 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1936 #undef TARGET_OFFLOAD_OPTIONS
1937 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1939 #undef TARGET_C_MODE_FOR_SUFFIX
1940 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1942 #undef TARGET_INVALID_BINARY_OP
1943 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1945 #undef TARGET_OPTAB_SUPPORTED_P
1946 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1948 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1949 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1951 #undef TARGET_COMPARE_VERSION_PRIORITY
1952 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1954 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1955 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1956 rs6000_generate_version_dispatcher_body
1958 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1959 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1960 rs6000_get_function_versions_dispatcher
1962 #undef TARGET_OPTION_FUNCTION_VERSIONS
1963 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1967 /* Processor table. */
1968 struct rs6000_ptt
1970 const char *const name; /* Canonical processor name. */
1971 const enum processor_type processor; /* Processor type enum value. */
1972 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1975 static struct rs6000_ptt const processor_target_table[] =
1977 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1978 #include "rs6000-cpus.def"
1979 #undef RS6000_CPU
1982 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1983 name is invalid. */
1985 static int
1986 rs6000_cpu_name_lookup (const char *name)
1988 size_t i;
1990 if (name != NULL)
1992 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1993 if (! strcmp (name, processor_target_table[i].name))
1994 return (int)i;
1997 return -1;
2001 /* Return number of consecutive hard regs needed starting at reg REGNO
2002 to hold something of mode MODE.
2003 This is ordinarily the length in words of a value of mode MODE
2004 but can be less for certain modes in special long registers.
2006 POWER and PowerPC GPRs hold 32 bits worth;
2007 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2009 static int
2010 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2012 unsigned HOST_WIDE_INT reg_size;
2014 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2015 128-bit floating point that can go in vector registers, which has VSX
2016 memory addressing. */
2017 if (FP_REGNO_P (regno))
2018 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2019 ? UNITS_PER_VSX_WORD
2020 : UNITS_PER_FP_WORD);
2022 else if (ALTIVEC_REGNO_P (regno))
2023 reg_size = UNITS_PER_ALTIVEC_WORD;
2025 else
2026 reg_size = UNITS_PER_WORD;
2028 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2031 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2032 MODE. */
2033 static int
2034 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
2036 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2038 if (COMPLEX_MODE_P (mode))
2039 mode = GET_MODE_INNER (mode);
2041 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2042 register combinations, and use PTImode where we need to deal with quad
2043 word memory operations. Don't allow quad words in the argument or frame
2044 pointer registers, just registers 0..31. */
2045 if (mode == PTImode)
2046 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2047 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2048 && ((regno & 1) == 0));
2050 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2051 implementations. Don't allow an item to be split between a FP register
2052 and an Altivec register. Allow TImode in all VSX registers if the user
2053 asked for it. */
2054 if (TARGET_VSX && VSX_REGNO_P (regno)
2055 && (VECTOR_MEM_VSX_P (mode)
2056 || FLOAT128_VECTOR_P (mode)
2057 || reg_addr[mode].scalar_in_vmx_p
2058 || mode == TImode
2059 || (TARGET_VADDUQM && mode == V1TImode)))
2061 if (FP_REGNO_P (regno))
2062 return FP_REGNO_P (last_regno);
2064 if (ALTIVEC_REGNO_P (regno))
2066 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2067 return 0;
2069 return ALTIVEC_REGNO_P (last_regno);
2073 /* The GPRs can hold any mode, but values bigger than one register
2074 cannot go past R31. */
2075 if (INT_REGNO_P (regno))
2076 return INT_REGNO_P (last_regno);
2078 /* The float registers (except for VSX vector modes) can only hold floating
2079 modes and DImode. */
2080 if (FP_REGNO_P (regno))
2082 if (FLOAT128_VECTOR_P (mode))
2083 return false;
2085 if (SCALAR_FLOAT_MODE_P (mode)
2086 && (mode != TDmode || (regno % 2) == 0)
2087 && FP_REGNO_P (last_regno))
2088 return 1;
2090 if (GET_MODE_CLASS (mode) == MODE_INT)
2092 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2093 return 1;
2095 if (TARGET_P8_VECTOR && (mode == SImode))
2096 return 1;
2098 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2099 return 1;
2102 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2103 && PAIRED_VECTOR_MODE (mode))
2104 return 1;
2106 return 0;
2109 /* The CR register can only hold CC modes. */
2110 if (CR_REGNO_P (regno))
2111 return GET_MODE_CLASS (mode) == MODE_CC;
2113 if (CA_REGNO_P (regno))
2114 return mode == Pmode || mode == SImode;
2116 /* AltiVec only in AldyVec registers. */
2117 if (ALTIVEC_REGNO_P (regno))
2118 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2119 || mode == V1TImode);
2121 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2122 and it must be able to fit within the register set. */
2124 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2127 /* Print interesting facts about registers. */
2128 static void
2129 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2131 int r, m;
2133 for (r = first_regno; r <= last_regno; ++r)
2135 const char *comma = "";
2136 int len;
2138 if (first_regno == last_regno)
2139 fprintf (stderr, "%s:\t", reg_name);
2140 else
2141 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2143 len = 8;
2144 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2145 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2147 if (len > 70)
2149 fprintf (stderr, ",\n\t");
2150 len = 8;
2151 comma = "";
2154 if (rs6000_hard_regno_nregs[m][r] > 1)
2155 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2156 rs6000_hard_regno_nregs[m][r]);
2157 else
2158 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2160 comma = ", ";
2163 if (call_used_regs[r])
2165 if (len > 70)
2167 fprintf (stderr, ",\n\t");
2168 len = 8;
2169 comma = "";
2172 len += fprintf (stderr, "%s%s", comma, "call-used");
2173 comma = ", ";
2176 if (fixed_regs[r])
2178 if (len > 70)
2180 fprintf (stderr, ",\n\t");
2181 len = 8;
2182 comma = "";
2185 len += fprintf (stderr, "%s%s", comma, "fixed");
2186 comma = ", ";
2189 if (len > 70)
2191 fprintf (stderr, ",\n\t");
2192 comma = "";
2195 len += fprintf (stderr, "%sreg-class = %s", comma,
2196 reg_class_names[(int)rs6000_regno_regclass[r]]);
2197 comma = ", ";
2199 if (len > 70)
2201 fprintf (stderr, ",\n\t");
2202 comma = "";
2205 fprintf (stderr, "%sregno = %d\n", comma, r);
2209 static const char *
2210 rs6000_debug_vector_unit (enum rs6000_vector v)
2212 const char *ret;
2214 switch (v)
2216 case VECTOR_NONE: ret = "none"; break;
2217 case VECTOR_ALTIVEC: ret = "altivec"; break;
2218 case VECTOR_VSX: ret = "vsx"; break;
2219 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2220 case VECTOR_PAIRED: ret = "paired"; break;
2221 case VECTOR_OTHER: ret = "other"; break;
2222 default: ret = "unknown"; break;
2225 return ret;
2228 /* Inner function printing just the address mask for a particular reload
2229 register class. */
2230 DEBUG_FUNCTION char *
2231 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2233 static char ret[8];
2234 char *p = ret;
2236 if ((mask & RELOAD_REG_VALID) != 0)
2237 *p++ = 'v';
2238 else if (keep_spaces)
2239 *p++ = ' ';
2241 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2242 *p++ = 'm';
2243 else if (keep_spaces)
2244 *p++ = ' ';
2246 if ((mask & RELOAD_REG_INDEXED) != 0)
2247 *p++ = 'i';
2248 else if (keep_spaces)
2249 *p++ = ' ';
2251 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2252 *p++ = 'O';
2253 else if ((mask & RELOAD_REG_OFFSET) != 0)
2254 *p++ = 'o';
2255 else if (keep_spaces)
2256 *p++ = ' ';
2258 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2259 *p++ = '+';
2260 else if (keep_spaces)
2261 *p++ = ' ';
2263 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2264 *p++ = '+';
2265 else if (keep_spaces)
2266 *p++ = ' ';
2268 if ((mask & RELOAD_REG_AND_M16) != 0)
2269 *p++ = '&';
2270 else if (keep_spaces)
2271 *p++ = ' ';
2273 *p = '\0';
2275 return ret;
2278 /* Print the address masks in a human readble fashion. */
2279 DEBUG_FUNCTION void
2280 rs6000_debug_print_mode (ssize_t m)
2282 ssize_t rc;
2283 int spaces = 0;
2284 bool fuse_extra_p;
2286 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2287 for (rc = 0; rc < N_RELOAD_REG; rc++)
2288 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2289 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2291 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2292 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2293 fprintf (stderr, " Reload=%c%c",
2294 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2295 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2296 else
2297 spaces += sizeof (" Reload=sl") - 1;
2299 if (reg_addr[m].scalar_in_vmx_p)
2301 fprintf (stderr, "%*s Upper=y", spaces, "");
2302 spaces = 0;
2304 else
2305 spaces += sizeof (" Upper=y") - 1;
2307 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2308 || reg_addr[m].fused_toc);
2309 if (!fuse_extra_p)
2311 for (rc = 0; rc < N_RELOAD_REG; rc++)
2313 if (rc != RELOAD_REG_ANY)
2315 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2316 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2317 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2318 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2319 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2321 fuse_extra_p = true;
2322 break;
2328 if (fuse_extra_p)
2330 fprintf (stderr, "%*s Fuse:", spaces, "");
2331 spaces = 0;
2333 for (rc = 0; rc < N_RELOAD_REG; rc++)
2335 if (rc != RELOAD_REG_ANY)
2337 char load, store;
2339 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2340 load = 'l';
2341 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2342 load = 'L';
2343 else
2344 load = '-';
2346 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2347 store = 's';
2348 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2349 store = 'S';
2350 else
2351 store = '-';
2353 if (load == '-' && store == '-')
2354 spaces += 5;
2355 else
2357 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2358 reload_reg_map[rc].name[0], load, store);
2359 spaces = 0;
2364 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2366 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2367 spaces = 0;
2369 else
2370 spaces += sizeof (" P8gpr") - 1;
2372 if (reg_addr[m].fused_toc)
2374 fprintf (stderr, "%*sToc", (spaces + 1), "");
2375 spaces = 0;
2377 else
2378 spaces += sizeof (" Toc") - 1;
2380 else
2381 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2383 if (rs6000_vector_unit[m] != VECTOR_NONE
2384 || rs6000_vector_mem[m] != VECTOR_NONE)
2386 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2387 spaces, "",
2388 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2389 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2392 fputs ("\n", stderr);
2395 #define DEBUG_FMT_ID "%-32s= "
2396 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2397 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2398 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2400 /* Print various interesting information with -mdebug=reg. */
2401 static void
2402 rs6000_debug_reg_global (void)
2404 static const char *const tf[2] = { "false", "true" };
2405 const char *nl = (const char *)0;
2406 int m;
2407 size_t m1, m2, v;
2408 char costly_num[20];
2409 char nop_num[20];
2410 char flags_buffer[40];
2411 const char *costly_str;
2412 const char *nop_str;
2413 const char *trace_str;
2414 const char *abi_str;
2415 const char *cmodel_str;
2416 struct cl_target_option cl_opts;
2418 /* Modes we want tieable information on. */
2419 static const machine_mode print_tieable_modes[] = {
2420 QImode,
2421 HImode,
2422 SImode,
2423 DImode,
2424 TImode,
2425 PTImode,
2426 SFmode,
2427 DFmode,
2428 TFmode,
2429 IFmode,
2430 KFmode,
2431 SDmode,
2432 DDmode,
2433 TDmode,
2434 V2SImode,
2435 V16QImode,
2436 V8HImode,
2437 V4SImode,
2438 V2DImode,
2439 V1TImode,
2440 V32QImode,
2441 V16HImode,
2442 V8SImode,
2443 V4DImode,
2444 V2TImode,
2445 V2SFmode,
2446 V4SFmode,
2447 V2DFmode,
2448 V8SFmode,
2449 V4DFmode,
2450 CCmode,
2451 CCUNSmode,
2452 CCEQmode,
2455 /* Virtual regs we are interested in. */
2456 const static struct {
2457 int regno; /* register number. */
2458 const char *name; /* register name. */
2459 } virtual_regs[] = {
2460 { STACK_POINTER_REGNUM, "stack pointer:" },
2461 { TOC_REGNUM, "toc: " },
2462 { STATIC_CHAIN_REGNUM, "static chain: " },
2463 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2464 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2465 { ARG_POINTER_REGNUM, "arg pointer: " },
2466 { FRAME_POINTER_REGNUM, "frame pointer:" },
2467 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2468 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2469 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2470 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2471 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2472 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2473 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2474 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2475 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2478 fputs ("\nHard register information:\n", stderr);
2479 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2480 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2481 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2482 LAST_ALTIVEC_REGNO,
2483 "vs");
2484 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2485 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2486 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2487 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2488 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2489 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2491 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2492 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2493 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2495 fprintf (stderr,
2496 "\n"
2497 "d reg_class = %s\n"
2498 "f reg_class = %s\n"
2499 "v reg_class = %s\n"
2500 "wa reg_class = %s\n"
2501 "wb reg_class = %s\n"
2502 "wd reg_class = %s\n"
2503 "we reg_class = %s\n"
2504 "wf reg_class = %s\n"
2505 "wg reg_class = %s\n"
2506 "wh reg_class = %s\n"
2507 "wi reg_class = %s\n"
2508 "wj reg_class = %s\n"
2509 "wk reg_class = %s\n"
2510 "wl reg_class = %s\n"
2511 "wm reg_class = %s\n"
2512 "wo reg_class = %s\n"
2513 "wp reg_class = %s\n"
2514 "wq reg_class = %s\n"
2515 "wr reg_class = %s\n"
2516 "ws reg_class = %s\n"
2517 "wt reg_class = %s\n"
2518 "wu reg_class = %s\n"
2519 "wv reg_class = %s\n"
2520 "ww reg_class = %s\n"
2521 "wx reg_class = %s\n"
2522 "wy reg_class = %s\n"
2523 "wz reg_class = %s\n"
2524 "wA reg_class = %s\n"
2525 "wH reg_class = %s\n"
2526 "wI reg_class = %s\n"
2527 "wJ reg_class = %s\n"
2528 "wK reg_class = %s\n"
2529 "\n",
2530 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2531 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2532 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2533 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2534 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2535 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2536 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2537 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2538 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2539 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2563 nl = "\n";
2564 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2565 rs6000_debug_print_mode (m);
2567 fputs ("\n", stderr);
2569 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2571 machine_mode mode1 = print_tieable_modes[m1];
2572 bool first_time = true;
2574 nl = (const char *)0;
2575 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2577 machine_mode mode2 = print_tieable_modes[m2];
2578 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2580 if (first_time)
2582 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2583 nl = "\n";
2584 first_time = false;
2587 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2591 if (!first_time)
2592 fputs ("\n", stderr);
2595 if (nl)
2596 fputs (nl, stderr);
2598 if (rs6000_recip_control)
2600 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2602 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2603 if (rs6000_recip_bits[m])
2605 fprintf (stderr,
2606 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2607 GET_MODE_NAME (m),
2608 (RS6000_RECIP_AUTO_RE_P (m)
2609 ? "auto"
2610 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2611 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2612 ? "auto"
2613 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2616 fputs ("\n", stderr);
2619 if (rs6000_cpu_index >= 0)
2621 const char *name = processor_target_table[rs6000_cpu_index].name;
2622 HOST_WIDE_INT flags
2623 = processor_target_table[rs6000_cpu_index].target_enable;
2625 sprintf (flags_buffer, "-mcpu=%s flags", name);
2626 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2628 else
2629 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2631 if (rs6000_tune_index >= 0)
2633 const char *name = processor_target_table[rs6000_tune_index].name;
2634 HOST_WIDE_INT flags
2635 = processor_target_table[rs6000_tune_index].target_enable;
2637 sprintf (flags_buffer, "-mtune=%s flags", name);
2638 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2640 else
2641 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2643 cl_target_option_save (&cl_opts, &global_options);
2644 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2645 rs6000_isa_flags);
2647 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2648 rs6000_isa_flags_explicit);
2650 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2651 rs6000_builtin_mask);
2653 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2655 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2656 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2658 switch (rs6000_sched_costly_dep)
2660 case max_dep_latency:
2661 costly_str = "max_dep_latency";
2662 break;
2664 case no_dep_costly:
2665 costly_str = "no_dep_costly";
2666 break;
2668 case all_deps_costly:
2669 costly_str = "all_deps_costly";
2670 break;
2672 case true_store_to_load_dep_costly:
2673 costly_str = "true_store_to_load_dep_costly";
2674 break;
2676 case store_to_load_dep_costly:
2677 costly_str = "store_to_load_dep_costly";
2678 break;
2680 default:
2681 costly_str = costly_num;
2682 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2683 break;
2686 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2688 switch (rs6000_sched_insert_nops)
2690 case sched_finish_regroup_exact:
2691 nop_str = "sched_finish_regroup_exact";
2692 break;
2694 case sched_finish_pad_groups:
2695 nop_str = "sched_finish_pad_groups";
2696 break;
2698 case sched_finish_none:
2699 nop_str = "sched_finish_none";
2700 break;
2702 default:
2703 nop_str = nop_num;
2704 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2705 break;
2708 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2710 switch (rs6000_sdata)
2712 default:
2713 case SDATA_NONE:
2714 break;
2716 case SDATA_DATA:
2717 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2718 break;
2720 case SDATA_SYSV:
2721 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2722 break;
2724 case SDATA_EABI:
2725 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2726 break;
2730 switch (rs6000_traceback)
2732 case traceback_default: trace_str = "default"; break;
2733 case traceback_none: trace_str = "none"; break;
2734 case traceback_part: trace_str = "part"; break;
2735 case traceback_full: trace_str = "full"; break;
2736 default: trace_str = "unknown"; break;
2739 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2741 switch (rs6000_current_cmodel)
2743 case CMODEL_SMALL: cmodel_str = "small"; break;
2744 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2745 case CMODEL_LARGE: cmodel_str = "large"; break;
2746 default: cmodel_str = "unknown"; break;
2749 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2751 switch (rs6000_current_abi)
2753 case ABI_NONE: abi_str = "none"; break;
2754 case ABI_AIX: abi_str = "aix"; break;
2755 case ABI_ELFv2: abi_str = "ELFv2"; break;
2756 case ABI_V4: abi_str = "V4"; break;
2757 case ABI_DARWIN: abi_str = "darwin"; break;
2758 default: abi_str = "unknown"; break;
2761 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2763 if (rs6000_altivec_abi)
2764 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2766 if (rs6000_darwin64_abi)
2767 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2769 fprintf (stderr, DEBUG_FMT_S, "single_float",
2770 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2772 fprintf (stderr, DEBUG_FMT_S, "double_float",
2773 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2775 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2776 (TARGET_SOFT_FLOAT ? "true" : "false"));
2778 if (TARGET_LINK_STACK)
2779 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2781 if (TARGET_P8_FUSION)
2783 char options[80];
2785 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2786 if (TARGET_TOC_FUSION)
2787 strcat (options, ", toc");
2789 if (TARGET_P8_FUSION_SIGN)
2790 strcat (options, ", sign");
2792 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2795 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2796 TARGET_SECURE_PLT ? "secure" : "bss");
2797 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2798 aix_struct_return ? "aix" : "sysv");
2799 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2800 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2801 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2802 tf[!!rs6000_align_branch_targets]);
2803 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2804 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2805 rs6000_long_double_type_size);
2806 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2807 (int)rs6000_sched_restricted_insns_priority);
2808 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2809 (int)END_BUILTINS);
2810 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2811 (int)RS6000_BUILTIN_COUNT);
2813 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2814 (int)TARGET_FLOAT128_ENABLE_TYPE);
2816 if (TARGET_VSX)
2817 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2818 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2820 if (TARGET_DIRECT_MOVE_128)
2821 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2822 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2826 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2827 legitimate address support to figure out the appropriate addressing to
2828 use. */
2830 static void
2831 rs6000_setup_reg_addr_masks (void)
2833 ssize_t rc, reg, m, nregs;
2834 addr_mask_type any_addr_mask, addr_mask;
2836 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2838 machine_mode m2 = (machine_mode) m;
2839 bool complex_p = false;
2840 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2841 size_t msize;
2843 if (COMPLEX_MODE_P (m2))
2845 complex_p = true;
2846 m2 = GET_MODE_INNER (m2);
2849 msize = GET_MODE_SIZE (m2);
2851 /* SDmode is special in that we want to access it only via REG+REG
2852 addressing on power7 and above, since we want to use the LFIWZX and
2853 STFIWZX instructions to load it. */
2854 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2856 any_addr_mask = 0;
2857 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2859 addr_mask = 0;
2860 reg = reload_reg_map[rc].reg;
2862 /* Can mode values go in the GPR/FPR/Altivec registers? */
2863 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2865 bool small_int_vsx_p = (small_int_p
2866 && (rc == RELOAD_REG_FPR
2867 || rc == RELOAD_REG_VMX));
2869 nregs = rs6000_hard_regno_nregs[m][reg];
2870 addr_mask |= RELOAD_REG_VALID;
2872 /* Indicate if the mode takes more than 1 physical register. If
2873 it takes a single register, indicate it can do REG+REG
2874 addressing. Small integers in VSX registers can only do
2875 REG+REG addressing. */
2876 if (small_int_vsx_p)
2877 addr_mask |= RELOAD_REG_INDEXED;
2878 else if (nregs > 1 || m == BLKmode || complex_p)
2879 addr_mask |= RELOAD_REG_MULTIPLE;
2880 else
2881 addr_mask |= RELOAD_REG_INDEXED;
2883 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2884 addressing. If we allow scalars into Altivec registers,
2885 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2887 if (TARGET_UPDATE
2888 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2889 && msize <= 8
2890 && !VECTOR_MODE_P (m2)
2891 && !FLOAT128_VECTOR_P (m2)
2892 && !complex_p
2893 && !small_int_vsx_p)
2895 addr_mask |= RELOAD_REG_PRE_INCDEC;
2897 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2898 we don't allow PRE_MODIFY for some multi-register
2899 operations. */
2900 switch (m)
2902 default:
2903 addr_mask |= RELOAD_REG_PRE_MODIFY;
2904 break;
2906 case E_DImode:
2907 if (TARGET_POWERPC64)
2908 addr_mask |= RELOAD_REG_PRE_MODIFY;
2909 break;
2911 case E_DFmode:
2912 case E_DDmode:
2913 if (TARGET_DF_INSN)
2914 addr_mask |= RELOAD_REG_PRE_MODIFY;
2915 break;
2920 /* GPR and FPR registers can do REG+OFFSET addressing, except
2921 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2922 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2923 if ((addr_mask != 0) && !indexed_only_p
2924 && msize <= 8
2925 && (rc == RELOAD_REG_GPR
2926 || ((msize == 8 || m2 == SFmode)
2927 && (rc == RELOAD_REG_FPR
2928 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2929 addr_mask |= RELOAD_REG_OFFSET;
2931 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2932 instructions are enabled. The offset for 128-bit VSX registers is
2933 only 12-bits. While GPRs can handle the full offset range, VSX
2934 registers can only handle the restricted range. */
2935 else if ((addr_mask != 0) && !indexed_only_p
2936 && msize == 16 && TARGET_P9_VECTOR
2937 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2938 || (m2 == TImode && TARGET_VSX)))
2940 addr_mask |= RELOAD_REG_OFFSET;
2941 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2942 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2945 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2946 addressing on 128-bit types. */
2947 if (rc == RELOAD_REG_VMX && msize == 16
2948 && (addr_mask & RELOAD_REG_VALID) != 0)
2949 addr_mask |= RELOAD_REG_AND_M16;
2951 reg_addr[m].addr_mask[rc] = addr_mask;
2952 any_addr_mask |= addr_mask;
2955 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2960 /* Initialize the various global tables that are based on register size. */
2961 static void
2962 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2964 ssize_t r, m, c;
2965 int align64;
2966 int align32;
2968 /* Precalculate REGNO_REG_CLASS. */
2969 rs6000_regno_regclass[0] = GENERAL_REGS;
2970 for (r = 1; r < 32; ++r)
2971 rs6000_regno_regclass[r] = BASE_REGS;
2973 for (r = 32; r < 64; ++r)
2974 rs6000_regno_regclass[r] = FLOAT_REGS;
2976 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2977 rs6000_regno_regclass[r] = NO_REGS;
2979 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2980 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2982 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2983 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2984 rs6000_regno_regclass[r] = CR_REGS;
2986 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2987 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2988 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2989 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2990 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2991 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2992 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2993 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2994 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2995 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2997 /* Precalculate register class to simpler reload register class. We don't
2998 need all of the register classes that are combinations of different
2999 classes, just the simple ones that have constraint letters. */
3000 for (c = 0; c < N_REG_CLASSES; c++)
3001 reg_class_to_reg_type[c] = NO_REG_TYPE;
3003 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3004 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3005 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3006 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3007 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3008 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3009 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3010 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3011 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3012 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3014 if (TARGET_VSX)
3016 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3017 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3019 else
3021 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3022 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3025 /* Precalculate the valid memory formats as well as the vector information,
3026 this must be set up before the rs6000_hard_regno_nregs_internal calls
3027 below. */
3028 gcc_assert ((int)VECTOR_NONE == 0);
3029 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3030 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3032 gcc_assert ((int)CODE_FOR_nothing == 0);
3033 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3035 gcc_assert ((int)NO_REGS == 0);
3036 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3038 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3039 believes it can use native alignment or still uses 128-bit alignment. */
3040 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3042 align64 = 64;
3043 align32 = 32;
3045 else
3047 align64 = 128;
3048 align32 = 128;
3051 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3052 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3053 if (TARGET_FLOAT128_TYPE)
3055 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3056 rs6000_vector_align[KFmode] = 128;
3058 if (FLOAT128_IEEE_P (TFmode))
3060 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3061 rs6000_vector_align[TFmode] = 128;
3065 /* V2DF mode, VSX only. */
3066 if (TARGET_VSX)
3068 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3069 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3070 rs6000_vector_align[V2DFmode] = align64;
3073 /* V4SF mode, either VSX or Altivec. */
3074 if (TARGET_VSX)
3076 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3077 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3078 rs6000_vector_align[V4SFmode] = align32;
3080 else if (TARGET_ALTIVEC)
3082 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3083 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3084 rs6000_vector_align[V4SFmode] = align32;
3087 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3088 and stores. */
3089 if (TARGET_ALTIVEC)
3091 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3092 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3093 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3094 rs6000_vector_align[V4SImode] = align32;
3095 rs6000_vector_align[V8HImode] = align32;
3096 rs6000_vector_align[V16QImode] = align32;
3098 if (TARGET_VSX)
3100 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3101 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3102 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3104 else
3106 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3107 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3108 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3112 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3113 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3114 if (TARGET_VSX)
3116 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3117 rs6000_vector_unit[V2DImode]
3118 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3119 rs6000_vector_align[V2DImode] = align64;
3121 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3122 rs6000_vector_unit[V1TImode]
3123 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3124 rs6000_vector_align[V1TImode] = 128;
3127 /* DFmode, see if we want to use the VSX unit. Memory is handled
3128 differently, so don't set rs6000_vector_mem. */
3129 if (TARGET_VSX)
3131 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3132 rs6000_vector_align[DFmode] = 64;
3135 /* SFmode, see if we want to use the VSX unit. */
3136 if (TARGET_P8_VECTOR)
3138 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3139 rs6000_vector_align[SFmode] = 32;
3142 /* Allow TImode in VSX register and set the VSX memory macros. */
3143 if (TARGET_VSX)
3145 rs6000_vector_mem[TImode] = VECTOR_VSX;
3146 rs6000_vector_align[TImode] = align64;
3149 /* TODO add paired floating point vector support. */
3151 /* Register class constraints for the constraints that depend on compile
3152 switches. When the VSX code was added, different constraints were added
3153 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3154 of the VSX registers are used. The register classes for scalar floating
3155 point types is set, based on whether we allow that type into the upper
3156 (Altivec) registers. GCC has register classes to target the Altivec
3157 registers for load/store operations, to select using a VSX memory
3158 operation instead of the traditional floating point operation. The
3159 constraints are:
3161 d - Register class to use with traditional DFmode instructions.
3162 f - Register class to use with traditional SFmode instructions.
3163 v - Altivec register.
3164 wa - Any VSX register.
3165 wc - Reserved to represent individual CR bits (used in LLVM).
3166 wd - Preferred register class for V2DFmode.
3167 wf - Preferred register class for V4SFmode.
3168 wg - Float register for power6x move insns.
3169 wh - FP register for direct move instructions.
3170 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3171 wj - FP or VSX register to hold 64-bit integers for direct moves.
3172 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3173 wl - Float register if we can do 32-bit signed int loads.
3174 wm - VSX register for ISA 2.07 direct move operations.
3175 wn - always NO_REGS.
3176 wr - GPR if 64-bit mode is permitted.
3177 ws - Register class to do ISA 2.06 DF operations.
3178 wt - VSX register for TImode in VSX registers.
3179 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3180 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3181 ww - Register class to do SF conversions in with VSX operations.
3182 wx - Float register if we can do 32-bit int stores.
3183 wy - Register class to do ISA 2.07 SF operations.
3184 wz - Float register if we can do 32-bit unsigned int loads.
3185 wH - Altivec register if SImode is allowed in VSX registers.
3186 wI - VSX register if SImode is allowed in VSX registers.
3187 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3188 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3190 if (TARGET_HARD_FLOAT)
3191 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3193 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3194 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3196 if (TARGET_VSX)
3198 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3199 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3200 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3201 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3202 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3203 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3204 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3207 /* Add conditional constraints based on various options, to allow us to
3208 collapse multiple insn patterns. */
3209 if (TARGET_ALTIVEC)
3210 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3212 if (TARGET_MFPGPR) /* DFmode */
3213 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3215 if (TARGET_LFIWAX)
3216 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3218 if (TARGET_DIRECT_MOVE)
3220 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3221 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3222 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3223 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3224 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3225 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3228 if (TARGET_POWERPC64)
3230 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3231 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3234 if (TARGET_P8_VECTOR) /* SFmode */
3236 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3237 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3238 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3240 else if (TARGET_VSX)
3241 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3243 if (TARGET_STFIWX)
3244 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3246 if (TARGET_LFIWZX)
3247 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3249 if (TARGET_FLOAT128_TYPE)
3251 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3252 if (FLOAT128_IEEE_P (TFmode))
3253 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3256 if (TARGET_P9_VECTOR)
3258 /* Support for new D-form instructions. */
3259 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3261 /* Support for ISA 3.0 (power9) vectors. */
3262 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3265 /* Support for new direct moves (ISA 3.0 + 64bit). */
3266 if (TARGET_DIRECT_MOVE_128)
3267 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3269 /* Support small integers in VSX registers. */
3270 if (TARGET_P8_VECTOR)
3272 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3273 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3274 if (TARGET_P9_VECTOR)
3276 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3277 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3281 /* Set up the reload helper and direct move functions. */
3282 if (TARGET_VSX || TARGET_ALTIVEC)
3284 if (TARGET_64BIT)
3286 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3287 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3288 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3289 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3290 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3291 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3292 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3293 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3294 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3295 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3296 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3297 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3298 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3299 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3300 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3301 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3302 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3303 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3304 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3305 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3307 if (FLOAT128_VECTOR_P (KFmode))
3309 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3310 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3313 if (FLOAT128_VECTOR_P (TFmode))
3315 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3316 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3319 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3320 available. */
3321 if (TARGET_NO_SDMODE_STACK)
3323 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3324 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3327 if (TARGET_VSX)
3329 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3330 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3333 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3335 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3336 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3337 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3338 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3339 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3340 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3341 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3342 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3343 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3345 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3346 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3347 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3348 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3349 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3350 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3351 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3352 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3353 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3355 if (FLOAT128_VECTOR_P (KFmode))
3357 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3358 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3361 if (FLOAT128_VECTOR_P (TFmode))
3363 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3364 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3368 else
3370 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3371 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3372 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3373 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3374 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3375 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3376 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3377 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3378 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3379 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3380 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3381 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3382 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3383 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3384 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3385 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3386 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3387 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3388 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3389 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3391 if (FLOAT128_VECTOR_P (KFmode))
3393 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3394 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3397 if (FLOAT128_IEEE_P (TFmode))
3399 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3400 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3403 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3404 available. */
3405 if (TARGET_NO_SDMODE_STACK)
3407 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3408 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3411 if (TARGET_VSX)
3413 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3414 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3417 if (TARGET_DIRECT_MOVE)
3419 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3420 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3421 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3425 reg_addr[DFmode].scalar_in_vmx_p = true;
3426 reg_addr[DImode].scalar_in_vmx_p = true;
3428 if (TARGET_P8_VECTOR)
3430 reg_addr[SFmode].scalar_in_vmx_p = true;
3431 reg_addr[SImode].scalar_in_vmx_p = true;
3433 if (TARGET_P9_VECTOR)
3435 reg_addr[HImode].scalar_in_vmx_p = true;
3436 reg_addr[QImode].scalar_in_vmx_p = true;
3441 /* Setup the fusion operations. */
3442 if (TARGET_P8_FUSION)
3444 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3445 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3446 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3447 if (TARGET_64BIT)
3448 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3451 if (TARGET_P9_FUSION)
3453 struct fuse_insns {
3454 enum machine_mode mode; /* mode of the fused type. */
3455 enum machine_mode pmode; /* pointer mode. */
3456 enum rs6000_reload_reg_type rtype; /* register type. */
3457 enum insn_code load; /* load insn. */
3458 enum insn_code store; /* store insn. */
3461 static const struct fuse_insns addis_insns[] = {
3462 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3463 CODE_FOR_fusion_vsx_di_sf_load,
3464 CODE_FOR_fusion_vsx_di_sf_store },
3466 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3467 CODE_FOR_fusion_vsx_si_sf_load,
3468 CODE_FOR_fusion_vsx_si_sf_store },
3470 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3471 CODE_FOR_fusion_vsx_di_df_load,
3472 CODE_FOR_fusion_vsx_di_df_store },
3474 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3475 CODE_FOR_fusion_vsx_si_df_load,
3476 CODE_FOR_fusion_vsx_si_df_store },
3478 { E_DImode, E_DImode, RELOAD_REG_FPR,
3479 CODE_FOR_fusion_vsx_di_di_load,
3480 CODE_FOR_fusion_vsx_di_di_store },
3482 { E_DImode, E_SImode, RELOAD_REG_FPR,
3483 CODE_FOR_fusion_vsx_si_di_load,
3484 CODE_FOR_fusion_vsx_si_di_store },
3486 { E_QImode, E_DImode, RELOAD_REG_GPR,
3487 CODE_FOR_fusion_gpr_di_qi_load,
3488 CODE_FOR_fusion_gpr_di_qi_store },
3490 { E_QImode, E_SImode, RELOAD_REG_GPR,
3491 CODE_FOR_fusion_gpr_si_qi_load,
3492 CODE_FOR_fusion_gpr_si_qi_store },
3494 { E_HImode, E_DImode, RELOAD_REG_GPR,
3495 CODE_FOR_fusion_gpr_di_hi_load,
3496 CODE_FOR_fusion_gpr_di_hi_store },
3498 { E_HImode, E_SImode, RELOAD_REG_GPR,
3499 CODE_FOR_fusion_gpr_si_hi_load,
3500 CODE_FOR_fusion_gpr_si_hi_store },
3502 { E_SImode, E_DImode, RELOAD_REG_GPR,
3503 CODE_FOR_fusion_gpr_di_si_load,
3504 CODE_FOR_fusion_gpr_di_si_store },
3506 { E_SImode, E_SImode, RELOAD_REG_GPR,
3507 CODE_FOR_fusion_gpr_si_si_load,
3508 CODE_FOR_fusion_gpr_si_si_store },
3510 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3511 CODE_FOR_fusion_gpr_di_sf_load,
3512 CODE_FOR_fusion_gpr_di_sf_store },
3514 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3515 CODE_FOR_fusion_gpr_si_sf_load,
3516 CODE_FOR_fusion_gpr_si_sf_store },
3518 { E_DImode, E_DImode, RELOAD_REG_GPR,
3519 CODE_FOR_fusion_gpr_di_di_load,
3520 CODE_FOR_fusion_gpr_di_di_store },
3522 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3523 CODE_FOR_fusion_gpr_di_df_load,
3524 CODE_FOR_fusion_gpr_di_df_store },
3527 machine_mode cur_pmode = Pmode;
3528 size_t i;
3530 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3532 machine_mode xmode = addis_insns[i].mode;
3533 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3535 if (addis_insns[i].pmode != cur_pmode)
3536 continue;
3538 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3539 continue;
3541 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3542 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3544 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3546 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3547 = addis_insns[i].load;
3548 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3549 = addis_insns[i].store;
3554 /* Note which types we support fusing TOC setup plus memory insn. We only do
3555 fused TOCs for medium/large code models. */
3556 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3557 && (TARGET_CMODEL != CMODEL_SMALL))
3559 reg_addr[QImode].fused_toc = true;
3560 reg_addr[HImode].fused_toc = true;
3561 reg_addr[SImode].fused_toc = true;
3562 reg_addr[DImode].fused_toc = true;
3563 if (TARGET_HARD_FLOAT)
3565 if (TARGET_SINGLE_FLOAT)
3566 reg_addr[SFmode].fused_toc = true;
3567 if (TARGET_DOUBLE_FLOAT)
3568 reg_addr[DFmode].fused_toc = true;
3572 /* Precalculate HARD_REGNO_NREGS. */
3573 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3574 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3575 rs6000_hard_regno_nregs[m][r]
3576 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3578 /* Precalculate HARD_REGNO_MODE_OK. */
3579 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3580 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3581 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3582 rs6000_hard_regno_mode_ok_p[m][r] = true;
3584 /* Precalculate CLASS_MAX_NREGS sizes. */
3585 for (c = 0; c < LIM_REG_CLASSES; ++c)
3587 int reg_size;
3589 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3590 reg_size = UNITS_PER_VSX_WORD;
3592 else if (c == ALTIVEC_REGS)
3593 reg_size = UNITS_PER_ALTIVEC_WORD;
3595 else if (c == FLOAT_REGS)
3596 reg_size = UNITS_PER_FP_WORD;
3598 else
3599 reg_size = UNITS_PER_WORD;
3601 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3603 machine_mode m2 = (machine_mode)m;
3604 int reg_size2 = reg_size;
3606 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3607 in VSX. */
3608 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3609 reg_size2 = UNITS_PER_FP_WORD;
3611 rs6000_class_max_nregs[m][c]
3612 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3616 /* Calculate which modes to automatically generate code to use a the
3617 reciprocal divide and square root instructions. In the future, possibly
3618 automatically generate the instructions even if the user did not specify
3619 -mrecip. The older machines double precision reciprocal sqrt estimate is
3620 not accurate enough. */
3621 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3622 if (TARGET_FRES)
3623 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3624 if (TARGET_FRE)
3625 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3626 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3627 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3628 if (VECTOR_UNIT_VSX_P (V2DFmode))
3629 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3631 if (TARGET_FRSQRTES)
3632 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3633 if (TARGET_FRSQRTE)
3634 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3635 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3636 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3637 if (VECTOR_UNIT_VSX_P (V2DFmode))
3638 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3640 if (rs6000_recip_control)
3642 if (!flag_finite_math_only)
3643 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3644 "-ffast-math");
3645 if (flag_trapping_math)
3646 warning (0, "%qs requires %qs or %qs", "-mrecip",
3647 "-fno-trapping-math", "-ffast-math");
3648 if (!flag_reciprocal_math)
3649 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3650 "-ffast-math");
3651 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3653 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3654 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3655 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3657 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3658 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3659 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3661 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3662 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3663 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3665 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3666 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3667 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3669 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3670 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3671 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3673 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3674 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3675 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3677 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3678 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3679 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3681 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3682 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3683 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3687 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3688 legitimate address support to figure out the appropriate addressing to
3689 use. */
3690 rs6000_setup_reg_addr_masks ();
3692 if (global_init_p || TARGET_DEBUG_TARGET)
3694 if (TARGET_DEBUG_REG)
3695 rs6000_debug_reg_global ();
3697 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3698 fprintf (stderr,
3699 "SImode variable mult cost = %d\n"
3700 "SImode constant mult cost = %d\n"
3701 "SImode short constant mult cost = %d\n"
3702 "DImode multipliciation cost = %d\n"
3703 "SImode division cost = %d\n"
3704 "DImode division cost = %d\n"
3705 "Simple fp operation cost = %d\n"
3706 "DFmode multiplication cost = %d\n"
3707 "SFmode division cost = %d\n"
3708 "DFmode division cost = %d\n"
3709 "cache line size = %d\n"
3710 "l1 cache size = %d\n"
3711 "l2 cache size = %d\n"
3712 "simultaneous prefetches = %d\n"
3713 "\n",
3714 rs6000_cost->mulsi,
3715 rs6000_cost->mulsi_const,
3716 rs6000_cost->mulsi_const9,
3717 rs6000_cost->muldi,
3718 rs6000_cost->divsi,
3719 rs6000_cost->divdi,
3720 rs6000_cost->fp,
3721 rs6000_cost->dmul,
3722 rs6000_cost->sdiv,
3723 rs6000_cost->ddiv,
3724 rs6000_cost->cache_line_size,
3725 rs6000_cost->l1_cache_size,
3726 rs6000_cost->l2_cache_size,
3727 rs6000_cost->simultaneous_prefetches);
3731 #if TARGET_MACHO
3732 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3734 static void
3735 darwin_rs6000_override_options (void)
3737 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3738 off. */
3739 rs6000_altivec_abi = 1;
3740 TARGET_ALTIVEC_VRSAVE = 1;
3741 rs6000_current_abi = ABI_DARWIN;
3743 if (DEFAULT_ABI == ABI_DARWIN
3744 && TARGET_64BIT)
3745 darwin_one_byte_bool = 1;
3747 if (TARGET_64BIT && ! TARGET_POWERPC64)
3749 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3750 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3752 if (flag_mkernel)
3754 rs6000_default_long_calls = 1;
3755 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3758 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3759 Altivec. */
3760 if (!flag_mkernel && !flag_apple_kext
3761 && TARGET_64BIT
3762 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3763 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3765 /* Unless the user (not the configurer) has explicitly overridden
3766 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3767 G4 unless targeting the kernel. */
3768 if (!flag_mkernel
3769 && !flag_apple_kext
3770 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3771 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3772 && ! global_options_set.x_rs6000_cpu_index)
3774 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3777 #endif
3779 /* If not otherwise specified by a target, make 'long double' equivalent to
3780 'double'. */
3782 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3783 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3784 #endif
3786 /* Return the builtin mask of the various options used that could affect which
3787 builtins were used. In the past we used target_flags, but we've run out of
3788 bits, and some options like PAIRED are no longer in target_flags. */
3790 HOST_WIDE_INT
3791 rs6000_builtin_mask_calculate (void)
3793 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3794 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3795 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3796 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3797 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3798 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3799 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3800 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3801 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3802 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3803 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3804 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3805 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3806 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3807 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3808 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3809 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3810 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3811 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3812 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3813 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3816 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3817 to clobber the XER[CA] bit because clobbering that bit without telling
3818 the compiler worked just fine with versions of GCC before GCC 5, and
3819 breaking a lot of older code in ways that are hard to track down is
3820 not such a great idea. */
3822 static rtx_insn *
3823 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3824 vec<const char *> &/*constraints*/,
3825 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3827 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3828 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3829 return NULL;
3832 /* Override command line options.
3834 Combine build-specific configuration information with options
3835 specified on the command line to set various state variables which
3836 influence code generation, optimization, and expansion of built-in
3837 functions. Assure that command-line configuration preferences are
3838 compatible with each other and with the build configuration; issue
3839 warnings while adjusting configuration or error messages while
3840 rejecting configuration.
3842 Upon entry to this function:
3844 This function is called once at the beginning of
3845 compilation, and then again at the start and end of compiling
3846 each section of code that has a different configuration, as
3847 indicated, for example, by adding the
3849 __attribute__((__target__("cpu=power9")))
3851 qualifier to a function definition or, for example, by bracketing
3852 code between
3854 #pragma GCC target("altivec")
3858 #pragma GCC reset_options
3860 directives. Parameter global_init_p is true for the initial
3861 invocation, which initializes global variables, and false for all
3862 subsequent invocations.
3865 Various global state information is assumed to be valid. This
3866 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3867 default CPU specified at build configure time, TARGET_DEFAULT,
3868 representing the default set of option flags for the default
3869 target, and global_options_set.x_rs6000_isa_flags, representing
3870 which options were requested on the command line.
3872 Upon return from this function:
3874 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3875 was set by name on the command line. Additionally, if certain
3876 attributes are automatically enabled or disabled by this function
3877 in order to assure compatibility between options and
3878 configuration, the flags associated with those attributes are
3879 also set. By setting these "explicit bits", we avoid the risk
3880 that other code might accidentally overwrite these particular
3881 attributes with "default values".
3883 The various bits of rs6000_isa_flags are set to indicate the
3884 target options that have been selected for the most current
3885 compilation efforts. This has the effect of also turning on the
3886 associated TARGET_XXX values since these are macros which are
3887 generally defined to test the corresponding bit of the
3888 rs6000_isa_flags variable.
3890 The variable rs6000_builtin_mask is set to represent the target
3891 options for the most current compilation efforts, consistent with
3892 the current contents of rs6000_isa_flags. This variable controls
3893 expansion of built-in functions.
3895 Various other global variables and fields of global structures
3896 (over 50 in all) are initialized to reflect the desired options
3897 for the most current compilation efforts. */
3899 static bool
3900 rs6000_option_override_internal (bool global_init_p)
3902 bool ret = true;
3903 bool have_cpu = false;
3905 /* The default cpu requested at configure time, if any. */
3906 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3908 HOST_WIDE_INT set_masks;
3909 HOST_WIDE_INT ignore_masks;
3910 int cpu_index;
3911 int tune_index;
3912 struct cl_target_option *main_target_opt
3913 = ((global_init_p || target_option_default_node == NULL)
3914 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3916 /* Print defaults. */
3917 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3918 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3920 /* Remember the explicit arguments. */
3921 if (global_init_p)
3922 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3924 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3925 library functions, so warn about it. The flag may be useful for
3926 performance studies from time to time though, so don't disable it
3927 entirely. */
3928 if (global_options_set.x_rs6000_alignment_flags
3929 && rs6000_alignment_flags == MASK_ALIGN_POWER
3930 && DEFAULT_ABI == ABI_DARWIN
3931 && TARGET_64BIT)
3932 warning (0, "%qs is not supported for 64-bit Darwin;"
3933 " it is incompatible with the installed C and C++ libraries",
3934 "-malign-power");
3936 /* Numerous experiment shows that IRA based loop pressure
3937 calculation works better for RTL loop invariant motion on targets
3938 with enough (>= 32) registers. It is an expensive optimization.
3939 So it is on only for peak performance. */
3940 if (optimize >= 3 && global_init_p
3941 && !global_options_set.x_flag_ira_loop_pressure)
3942 flag_ira_loop_pressure = 1;
3944 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3945 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3946 options were already specified. */
3947 if (flag_sanitize & SANITIZE_USER_ADDRESS
3948 && !global_options_set.x_flag_asynchronous_unwind_tables)
3949 flag_asynchronous_unwind_tables = 1;
3951 /* Set the pointer size. */
3952 if (TARGET_64BIT)
3954 rs6000_pmode = DImode;
3955 rs6000_pointer_size = 64;
3957 else
3959 rs6000_pmode = SImode;
3960 rs6000_pointer_size = 32;
3963 /* Some OSs don't support saving the high part of 64-bit registers on context
3964 switch. Other OSs don't support saving Altivec registers. On those OSs,
3965 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3966 if the user wants either, the user must explicitly specify them and we
3967 won't interfere with the user's specification. */
3969 set_masks = POWERPC_MASKS;
3970 #ifdef OS_MISSING_POWERPC64
3971 if (OS_MISSING_POWERPC64)
3972 set_masks &= ~OPTION_MASK_POWERPC64;
3973 #endif
3974 #ifdef OS_MISSING_ALTIVEC
3975 if (OS_MISSING_ALTIVEC)
3976 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3977 | OTHER_VSX_VECTOR_MASKS);
3978 #endif
3980 /* Don't override by the processor default if given explicitly. */
3981 set_masks &= ~rs6000_isa_flags_explicit;
3983 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3984 the cpu in a target attribute or pragma, but did not specify a tuning
3985 option, use the cpu for the tuning option rather than the option specified
3986 with -mtune on the command line. Process a '--with-cpu' configuration
3987 request as an implicit --cpu. */
3988 if (rs6000_cpu_index >= 0)
3990 cpu_index = rs6000_cpu_index;
3991 have_cpu = true;
3993 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3995 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3996 have_cpu = true;
3998 else if (implicit_cpu)
4000 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4001 have_cpu = true;
4003 else
4005 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4006 const char *default_cpu = ((!TARGET_POWERPC64)
4007 ? "powerpc"
4008 : ((BYTES_BIG_ENDIAN)
4009 ? "powerpc64"
4010 : "powerpc64le"));
4012 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4013 have_cpu = false;
4016 gcc_assert (cpu_index >= 0);
4018 if (have_cpu)
4020 #ifndef HAVE_AS_POWER9
4021 if (processor_target_table[rs6000_cpu_index].processor
4022 == PROCESSOR_POWER9)
4024 have_cpu = false;
4025 warning (0, "will not generate power9 instructions because "
4026 "assembler lacks power9 support");
4028 #endif
4029 #ifndef HAVE_AS_POWER8
4030 if (processor_target_table[rs6000_cpu_index].processor
4031 == PROCESSOR_POWER8)
4033 have_cpu = false;
4034 warning (0, "will not generate power8 instructions because "
4035 "assembler lacks power8 support");
4037 #endif
4038 #ifndef HAVE_AS_POPCNTD
4039 if (processor_target_table[rs6000_cpu_index].processor
4040 == PROCESSOR_POWER7)
4042 have_cpu = false;
4043 warning (0, "will not generate power7 instructions because "
4044 "assembler lacks power7 support");
4046 #endif
4047 #ifndef HAVE_AS_DFP
4048 if (processor_target_table[rs6000_cpu_index].processor
4049 == PROCESSOR_POWER6)
4051 have_cpu = false;
4052 warning (0, "will not generate power6 instructions because "
4053 "assembler lacks power6 support");
4055 #endif
4056 #ifndef HAVE_AS_POPCNTB
4057 if (processor_target_table[rs6000_cpu_index].processor
4058 == PROCESSOR_POWER5)
4060 have_cpu = false;
4061 warning (0, "will not generate power5 instructions because "
4062 "assembler lacks power5 support");
4064 #endif
4066 if (!have_cpu)
4068 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4069 const char *default_cpu = (!TARGET_POWERPC64
4070 ? "powerpc"
4071 : (BYTES_BIG_ENDIAN
4072 ? "powerpc64"
4073 : "powerpc64le"));
4075 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4079 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4080 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4081 with those from the cpu, except for options that were explicitly set. If
4082 we don't have a cpu, do not override the target bits set in
4083 TARGET_DEFAULT. */
4084 if (have_cpu)
4086 rs6000_isa_flags &= ~set_masks;
4087 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4088 & set_masks);
4090 else
4092 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4093 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4094 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4095 to using rs6000_isa_flags, we need to do the initialization here.
4097 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4098 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4099 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4100 : processor_target_table[cpu_index].target_enable);
4101 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4104 if (rs6000_tune_index >= 0)
4105 tune_index = rs6000_tune_index;
4106 else if (have_cpu)
4107 rs6000_tune_index = tune_index = cpu_index;
4108 else
4110 size_t i;
4111 enum processor_type tune_proc
4112 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4114 tune_index = -1;
4115 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4116 if (processor_target_table[i].processor == tune_proc)
4118 rs6000_tune_index = tune_index = i;
4119 break;
4123 gcc_assert (tune_index >= 0);
4124 rs6000_cpu = processor_target_table[tune_index].processor;
4126 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4127 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4128 || rs6000_cpu == PROCESSOR_PPCE5500)
4130 if (TARGET_ALTIVEC)
4131 error ("AltiVec not supported in this target");
4134 /* If we are optimizing big endian systems for space, use the load/store
4135 multiple and string instructions. */
4136 if (BYTES_BIG_ENDIAN && optimize_size)
4137 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4138 | OPTION_MASK_STRING);
4140 /* Don't allow -mmultiple or -mstring on little endian systems
4141 unless the cpu is a 750, because the hardware doesn't support the
4142 instructions used in little endian mode, and causes an alignment
4143 trap. The 750 does not cause an alignment trap (except when the
4144 target is unaligned). */
4146 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4148 if (TARGET_MULTIPLE)
4150 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4151 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4152 warning (0, "%qs is not supported on little endian systems",
4153 "-mmultiple");
4156 if (TARGET_STRING)
4158 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4159 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4160 warning (0, "%qs is not supported on little endian systems",
4161 "-mstring");
4165 /* If little-endian, default to -mstrict-align on older processors.
4166 Testing for htm matches power8 and later. */
4167 if (!BYTES_BIG_ENDIAN
4168 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4169 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4171 /* -maltivec={le,be} implies -maltivec. */
4172 if (rs6000_altivec_element_order != 0)
4173 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4175 /* Disallow -maltivec=le in big endian mode for now. This is not
4176 known to be useful for anyone. */
4177 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4179 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4180 rs6000_altivec_element_order = 0;
4183 if (!rs6000_fold_gimple)
4184 fprintf (stderr,
4185 "gimple folding of rs6000 builtins has been disabled.\n");
4187 /* Add some warnings for VSX. */
4188 if (TARGET_VSX)
4190 const char *msg = NULL;
4191 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4193 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4194 msg = N_("-mvsx requires hardware floating point");
4195 else
4197 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4198 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4201 else if (TARGET_PAIRED_FLOAT)
4202 msg = N_("-mvsx and -mpaired are incompatible");
4203 else if (TARGET_AVOID_XFORM > 0)
4204 msg = N_("-mvsx needs indexed addressing");
4205 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4206 & OPTION_MASK_ALTIVEC))
4208 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4209 msg = N_("-mvsx and -mno-altivec are incompatible");
4210 else
4211 msg = N_("-mno-altivec disables vsx");
4214 if (msg)
4216 warning (0, msg);
4217 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4218 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4222 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4223 the -mcpu setting to enable options that conflict. */
4224 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4225 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4226 | OPTION_MASK_ALTIVEC
4227 | OPTION_MASK_VSX)) != 0)
4228 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4229 | OPTION_MASK_DIRECT_MOVE)
4230 & ~rs6000_isa_flags_explicit);
4232 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4233 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4235 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4236 off all of the options that depend on those flags. */
4237 ignore_masks = rs6000_disable_incompatible_switches ();
4239 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4240 unless the user explicitly used the -mno-<option> to disable the code. */
4241 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4242 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4243 else if (TARGET_P9_MINMAX)
4245 if (have_cpu)
4247 if (cpu_index == PROCESSOR_POWER9)
4249 /* legacy behavior: allow -mcpu=power9 with certain
4250 capabilities explicitly disabled. */
4251 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4253 else
4254 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4255 "for <xxx> less than power9", "-mcpu");
4257 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4258 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4259 & rs6000_isa_flags_explicit))
4260 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4261 were explicitly cleared. */
4262 error ("%qs incompatible with explicitly disabled options",
4263 "-mpower9-minmax");
4264 else
4265 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4267 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4268 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4269 else if (TARGET_VSX)
4270 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4271 else if (TARGET_POPCNTD)
4272 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4273 else if (TARGET_DFP)
4274 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4275 else if (TARGET_CMPB)
4276 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4277 else if (TARGET_FPRND)
4278 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4279 else if (TARGET_POPCNTB)
4280 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4281 else if (TARGET_ALTIVEC)
4282 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4284 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4286 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4287 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4288 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4291 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4293 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4294 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4295 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4298 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4300 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4301 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4302 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4305 if (TARGET_P8_VECTOR && !TARGET_VSX)
4307 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4308 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4309 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4310 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4312 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4313 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4314 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4316 else
4318 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4319 not explicit. */
4320 rs6000_isa_flags |= OPTION_MASK_VSX;
4321 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4325 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4327 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4328 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4329 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4332 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4333 silently turn off quad memory mode. */
4334 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4336 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4337 warning (0, N_("-mquad-memory requires 64-bit mode"));
4339 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4340 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4342 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4343 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4346 /* Non-atomic quad memory load/store are disabled for little endian, since
4347 the words are reversed, but atomic operations can still be done by
4348 swapping the words. */
4349 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4351 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4352 warning (0, N_("-mquad-memory is not available in little endian "
4353 "mode"));
4355 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4358 /* Assume if the user asked for normal quad memory instructions, they want
4359 the atomic versions as well, unless they explicity told us not to use quad
4360 word atomic instructions. */
4361 if (TARGET_QUAD_MEMORY
4362 && !TARGET_QUAD_MEMORY_ATOMIC
4363 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4364 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4366 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4367 generating power8 instructions. */
4368 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4369 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4370 & OPTION_MASK_P8_FUSION);
4372 /* Setting additional fusion flags turns on base fusion. */
4373 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4375 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4377 if (TARGET_P8_FUSION_SIGN)
4378 error ("%qs requires %qs", "-mpower8-fusion-sign",
4379 "-mpower8-fusion");
4381 if (TARGET_TOC_FUSION)
4382 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4384 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4386 else
4387 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4390 /* Power9 fusion is a superset over power8 fusion. */
4391 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4393 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4395 /* We prefer to not mention undocumented options in
4396 error messages. However, if users have managed to select
4397 power9-fusion without selecting power8-fusion, they
4398 already know about undocumented flags. */
4399 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4400 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4402 else
4403 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4406 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4407 generating power9 instructions. */
4408 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4409 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4410 & OPTION_MASK_P9_FUSION);
4412 /* Power8 does not fuse sign extended loads with the addis. If we are
4413 optimizing at high levels for speed, convert a sign extended load into a
4414 zero extending load, and an explicit sign extension. */
4415 if (TARGET_P8_FUSION
4416 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4417 && optimize_function_for_speed_p (cfun)
4418 && optimize >= 3)
4419 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4421 /* TOC fusion requires 64-bit and medium/large code model. */
4422 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4424 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4425 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4426 warning (0, N_("-mtoc-fusion requires 64-bit"));
4429 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4431 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4432 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4433 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4436 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4437 model. */
4438 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4439 && (TARGET_CMODEL != CMODEL_SMALL)
4440 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4441 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4443 /* ISA 3.0 vector instructions include ISA 2.07. */
4444 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4446 /* We prefer to not mention undocumented options in
4447 error messages. However, if users have managed to select
4448 power9-vector without selecting power8-vector, they
4449 already know about undocumented flags. */
4450 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4451 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4452 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4453 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4455 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4456 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4457 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4459 else
4461 /* OPTION_MASK_P9_VECTOR is explicit and
4462 OPTION_MASK_P8_VECTOR is not explicit. */
4463 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4464 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4468 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4469 support. If we only have ISA 2.06 support, and the user did not specify
4470 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4471 but we don't enable the full vectorization support */
4472 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4473 TARGET_ALLOW_MOVMISALIGN = 1;
4475 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4477 if (TARGET_ALLOW_MOVMISALIGN > 0
4478 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4479 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4481 TARGET_ALLOW_MOVMISALIGN = 0;
4484 /* Determine when unaligned vector accesses are permitted, and when
4485 they are preferred over masked Altivec loads. Note that if
4486 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4487 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4488 not true. */
4489 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4491 if (!TARGET_VSX)
4493 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4494 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4496 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4499 else if (!TARGET_ALLOW_MOVMISALIGN)
4501 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4502 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4503 "-mallow-movmisalign");
4505 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4509 /* Set long double size before the IEEE 128-bit tests. */
4510 if (!global_options_set.x_rs6000_long_double_type_size)
4512 if (main_target_opt != NULL
4513 && (main_target_opt->x_rs6000_long_double_type_size
4514 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4515 error ("target attribute or pragma changes long double size");
4516 else
4517 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4520 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4521 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4522 pick up this default. */
4523 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4524 if (!global_options_set.x_rs6000_ieeequad)
4525 rs6000_ieeequad = 1;
4526 #endif
4528 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4529 sytems, but don't enable the __float128 keyword. */
4530 if (TARGET_VSX && TARGET_LONG_DOUBLE_128
4531 && (TARGET_FLOAT128_ENABLE_TYPE || TARGET_IEEEQUAD)
4532 && ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) == 0))
4533 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4535 /* IEEE 128-bit floating point requires VSX support. */
4536 if (!TARGET_VSX)
4538 if (TARGET_FLOAT128_KEYWORD)
4540 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4541 error ("%qs requires VSX support", "-mfloat128");
4543 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4544 | OPTION_MASK_FLOAT128_KEYWORD
4545 | OPTION_MASK_FLOAT128_HW);
4548 else if (TARGET_FLOAT128_TYPE)
4550 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) != 0)
4551 error ("%qs requires VSX support", "-mfloat128-type");
4553 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4554 | OPTION_MASK_FLOAT128_KEYWORD
4555 | OPTION_MASK_FLOAT128_HW);
4559 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4560 128-bit floating point support to be enabled. */
4561 if (!TARGET_FLOAT128_TYPE)
4563 if (TARGET_FLOAT128_KEYWORD)
4565 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4567 error ("%qs requires %qs", "-mfloat128", "-mfloat128-type");
4568 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4569 | OPTION_MASK_FLOAT128_KEYWORD
4570 | OPTION_MASK_FLOAT128_HW);
4572 else
4573 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4576 if (TARGET_FLOAT128_HW)
4578 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4580 error ("%qs requires %qs", "-mfloat128-hardware",
4581 "-mfloat128-type");
4582 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4584 else
4585 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4586 | OPTION_MASK_FLOAT128_KEYWORD
4587 | OPTION_MASK_FLOAT128_HW);
4591 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4592 -mfloat128-hardware by default. However, don't enable the __float128
4593 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4594 -mfloat128 option as well if it was not already set. */
4595 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
4596 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4597 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4598 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4600 if (TARGET_FLOAT128_HW
4601 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4603 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4604 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4606 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4609 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4611 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4612 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4614 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4617 if (TARGET_FLOAT128_HW && !TARGET_FLOAT128_KEYWORD
4618 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0
4619 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4620 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4622 /* Print the options after updating the defaults. */
4623 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4624 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4626 /* E500mc does "better" if we inline more aggressively. Respect the
4627 user's opinion, though. */
4628 if (rs6000_block_move_inline_limit == 0
4629 && (rs6000_cpu == PROCESSOR_PPCE500MC
4630 || rs6000_cpu == PROCESSOR_PPCE500MC64
4631 || rs6000_cpu == PROCESSOR_PPCE5500
4632 || rs6000_cpu == PROCESSOR_PPCE6500))
4633 rs6000_block_move_inline_limit = 128;
4635 /* store_one_arg depends on expand_block_move to handle at least the
4636 size of reg_parm_stack_space. */
4637 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4638 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4640 if (global_init_p)
4642 /* If the appropriate debug option is enabled, replace the target hooks
4643 with debug versions that call the real version and then prints
4644 debugging information. */
4645 if (TARGET_DEBUG_COST)
4647 targetm.rtx_costs = rs6000_debug_rtx_costs;
4648 targetm.address_cost = rs6000_debug_address_cost;
4649 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4652 if (TARGET_DEBUG_ADDR)
4654 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4655 targetm.legitimize_address = rs6000_debug_legitimize_address;
4656 rs6000_secondary_reload_class_ptr
4657 = rs6000_debug_secondary_reload_class;
4658 rs6000_secondary_memory_needed_ptr
4659 = rs6000_debug_secondary_memory_needed;
4660 rs6000_cannot_change_mode_class_ptr
4661 = rs6000_debug_cannot_change_mode_class;
4662 rs6000_preferred_reload_class_ptr
4663 = rs6000_debug_preferred_reload_class;
4664 rs6000_legitimize_reload_address_ptr
4665 = rs6000_debug_legitimize_reload_address;
4666 rs6000_mode_dependent_address_ptr
4667 = rs6000_debug_mode_dependent_address;
4670 if (rs6000_veclibabi_name)
4672 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4673 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4674 else
4676 error ("unknown vectorization library ABI type (%qs) for "
4677 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4678 ret = false;
4683 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4684 target attribute or pragma which automatically enables both options,
4685 unless the altivec ABI was set. This is set by default for 64-bit, but
4686 not for 32-bit. */
4687 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4688 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4689 | OPTION_MASK_FLOAT128_TYPE
4690 | OPTION_MASK_FLOAT128_KEYWORD)
4691 & ~rs6000_isa_flags_explicit);
4693 /* Enable Altivec ABI for AIX -maltivec. */
4694 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4696 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4697 error ("target attribute or pragma changes AltiVec ABI");
4698 else
4699 rs6000_altivec_abi = 1;
4702 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4703 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4704 be explicitly overridden in either case. */
4705 if (TARGET_ELF)
4707 if (!global_options_set.x_rs6000_altivec_abi
4708 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4710 if (main_target_opt != NULL &&
4711 !main_target_opt->x_rs6000_altivec_abi)
4712 error ("target attribute or pragma changes AltiVec ABI");
4713 else
4714 rs6000_altivec_abi = 1;
4718 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4719 So far, the only darwin64 targets are also MACH-O. */
4720 if (TARGET_MACHO
4721 && DEFAULT_ABI == ABI_DARWIN
4722 && TARGET_64BIT)
4724 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4725 error ("target attribute or pragma changes darwin64 ABI");
4726 else
4728 rs6000_darwin64_abi = 1;
4729 /* Default to natural alignment, for better performance. */
4730 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4734 /* Place FP constants in the constant pool instead of TOC
4735 if section anchors enabled. */
4736 if (flag_section_anchors
4737 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4738 TARGET_NO_FP_IN_TOC = 1;
4740 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4741 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4744 SUBTARGET_OVERRIDE_OPTIONS;
4745 #endif
4746 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4747 SUBSUBTARGET_OVERRIDE_OPTIONS;
4748 #endif
4749 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4750 SUB3TARGET_OVERRIDE_OPTIONS;
4751 #endif
4753 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4754 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4756 /* For the E500 family of cores, reset the single/double FP flags to let us
4757 check that they remain constant across attributes or pragmas. Also,
4758 clear a possible request for string instructions, not supported and which
4759 we might have silently queried above for -Os.
4761 For other families, clear ISEL in case it was set implicitly.
4764 switch (rs6000_cpu)
4766 case PROCESSOR_PPC8540:
4767 case PROCESSOR_PPC8548:
4768 case PROCESSOR_PPCE500MC:
4769 case PROCESSOR_PPCE500MC64:
4770 case PROCESSOR_PPCE5500:
4771 case PROCESSOR_PPCE6500:
4773 rs6000_single_float = 0;
4774 rs6000_double_float = 0;
4776 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4778 break;
4780 default:
4782 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4783 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4785 break;
4788 if (main_target_opt)
4790 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4791 error ("target attribute or pragma changes single precision floating "
4792 "point");
4793 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4794 error ("target attribute or pragma changes double precision floating "
4795 "point");
4798 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4799 && rs6000_cpu != PROCESSOR_POWER5
4800 && rs6000_cpu != PROCESSOR_POWER6
4801 && rs6000_cpu != PROCESSOR_POWER7
4802 && rs6000_cpu != PROCESSOR_POWER8
4803 && rs6000_cpu != PROCESSOR_POWER9
4804 && rs6000_cpu != PROCESSOR_PPCA2
4805 && rs6000_cpu != PROCESSOR_CELL
4806 && rs6000_cpu != PROCESSOR_PPC476);
4807 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4808 || rs6000_cpu == PROCESSOR_POWER5
4809 || rs6000_cpu == PROCESSOR_POWER7
4810 || rs6000_cpu == PROCESSOR_POWER8);
4811 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4812 || rs6000_cpu == PROCESSOR_POWER5
4813 || rs6000_cpu == PROCESSOR_POWER6
4814 || rs6000_cpu == PROCESSOR_POWER7
4815 || rs6000_cpu == PROCESSOR_POWER8
4816 || rs6000_cpu == PROCESSOR_POWER9
4817 || rs6000_cpu == PROCESSOR_PPCE500MC
4818 || rs6000_cpu == PROCESSOR_PPCE500MC64
4819 || rs6000_cpu == PROCESSOR_PPCE5500
4820 || rs6000_cpu == PROCESSOR_PPCE6500);
4822 /* Allow debug switches to override the above settings. These are set to -1
4823 in rs6000.opt to indicate the user hasn't directly set the switch. */
4824 if (TARGET_ALWAYS_HINT >= 0)
4825 rs6000_always_hint = TARGET_ALWAYS_HINT;
4827 if (TARGET_SCHED_GROUPS >= 0)
4828 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4830 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4831 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4833 rs6000_sched_restricted_insns_priority
4834 = (rs6000_sched_groups ? 1 : 0);
4836 /* Handle -msched-costly-dep option. */
4837 rs6000_sched_costly_dep
4838 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4840 if (rs6000_sched_costly_dep_str)
4842 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4843 rs6000_sched_costly_dep = no_dep_costly;
4844 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4845 rs6000_sched_costly_dep = all_deps_costly;
4846 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4847 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4848 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4849 rs6000_sched_costly_dep = store_to_load_dep_costly;
4850 else
4851 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4852 atoi (rs6000_sched_costly_dep_str));
4855 /* Handle -minsert-sched-nops option. */
4856 rs6000_sched_insert_nops
4857 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4859 if (rs6000_sched_insert_nops_str)
4861 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4862 rs6000_sched_insert_nops = sched_finish_none;
4863 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4864 rs6000_sched_insert_nops = sched_finish_pad_groups;
4865 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4866 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4867 else
4868 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4869 atoi (rs6000_sched_insert_nops_str));
4872 /* Handle stack protector */
4873 if (!global_options_set.x_rs6000_stack_protector_guard)
4874 #ifdef TARGET_THREAD_SSP_OFFSET
4875 rs6000_stack_protector_guard = SSP_TLS;
4876 #else
4877 rs6000_stack_protector_guard = SSP_GLOBAL;
4878 #endif
4880 #ifdef TARGET_THREAD_SSP_OFFSET
4881 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4882 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4883 #endif
4885 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4887 char *endp;
4888 const char *str = rs6000_stack_protector_guard_offset_str;
4890 errno = 0;
4891 long offset = strtol (str, &endp, 0);
4892 if (!*str || *endp || errno)
4893 error ("%qs is not a valid number in %qs", str,
4894 "-mstack-protector-guard-offset=");
4896 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4897 || (TARGET_64BIT && (offset & 3)))
4898 error ("%qs is not a valid offset in %qs", str,
4899 "-mstack-protector-guard-offset=");
4901 rs6000_stack_protector_guard_offset = offset;
4904 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4906 const char *str = rs6000_stack_protector_guard_reg_str;
4907 int reg = decode_reg_name (str);
4909 if (!IN_RANGE (reg, 1, 31))
4910 error ("%qs is not a valid base register in %qs", str,
4911 "-mstack-protector-guard-reg=");
4913 rs6000_stack_protector_guard_reg = reg;
4916 if (rs6000_stack_protector_guard == SSP_TLS
4917 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4918 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4920 if (global_init_p)
4922 #ifdef TARGET_REGNAMES
4923 /* If the user desires alternate register names, copy in the
4924 alternate names now. */
4925 if (TARGET_REGNAMES)
4926 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4927 #endif
4929 /* Set aix_struct_return last, after the ABI is determined.
4930 If -maix-struct-return or -msvr4-struct-return was explicitly
4931 used, don't override with the ABI default. */
4932 if (!global_options_set.x_aix_struct_return)
4933 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4935 #if 0
4936 /* IBM XL compiler defaults to unsigned bitfields. */
4937 if (TARGET_XL_COMPAT)
4938 flag_signed_bitfields = 0;
4939 #endif
4941 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4942 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4944 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4946 /* We can only guarantee the availability of DI pseudo-ops when
4947 assembling for 64-bit targets. */
4948 if (!TARGET_64BIT)
4950 targetm.asm_out.aligned_op.di = NULL;
4951 targetm.asm_out.unaligned_op.di = NULL;
4955 /* Set branch target alignment, if not optimizing for size. */
4956 if (!optimize_size)
4958 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4959 aligned 8byte to avoid misprediction by the branch predictor. */
4960 if (rs6000_cpu == PROCESSOR_TITAN
4961 || rs6000_cpu == PROCESSOR_CELL)
4963 if (align_functions <= 0)
4964 align_functions = 8;
4965 if (align_jumps <= 0)
4966 align_jumps = 8;
4967 if (align_loops <= 0)
4968 align_loops = 8;
4970 if (rs6000_align_branch_targets)
4972 if (align_functions <= 0)
4973 align_functions = 16;
4974 if (align_jumps <= 0)
4975 align_jumps = 16;
4976 if (align_loops <= 0)
4978 can_override_loop_align = 1;
4979 align_loops = 16;
4982 if (align_jumps_max_skip <= 0)
4983 align_jumps_max_skip = 15;
4984 if (align_loops_max_skip <= 0)
4985 align_loops_max_skip = 15;
4988 /* Arrange to save and restore machine status around nested functions. */
4989 init_machine_status = rs6000_init_machine_status;
4991 /* We should always be splitting complex arguments, but we can't break
4992 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4993 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4994 targetm.calls.split_complex_arg = NULL;
4996 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4997 if (DEFAULT_ABI == ABI_AIX)
4998 targetm.calls.custom_function_descriptors = 0;
5001 /* Initialize rs6000_cost with the appropriate target costs. */
5002 if (optimize_size)
5003 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5004 else
5005 switch (rs6000_cpu)
5007 case PROCESSOR_RS64A:
5008 rs6000_cost = &rs64a_cost;
5009 break;
5011 case PROCESSOR_MPCCORE:
5012 rs6000_cost = &mpccore_cost;
5013 break;
5015 case PROCESSOR_PPC403:
5016 rs6000_cost = &ppc403_cost;
5017 break;
5019 case PROCESSOR_PPC405:
5020 rs6000_cost = &ppc405_cost;
5021 break;
5023 case PROCESSOR_PPC440:
5024 rs6000_cost = &ppc440_cost;
5025 break;
5027 case PROCESSOR_PPC476:
5028 rs6000_cost = &ppc476_cost;
5029 break;
5031 case PROCESSOR_PPC601:
5032 rs6000_cost = &ppc601_cost;
5033 break;
5035 case PROCESSOR_PPC603:
5036 rs6000_cost = &ppc603_cost;
5037 break;
5039 case PROCESSOR_PPC604:
5040 rs6000_cost = &ppc604_cost;
5041 break;
5043 case PROCESSOR_PPC604e:
5044 rs6000_cost = &ppc604e_cost;
5045 break;
5047 case PROCESSOR_PPC620:
5048 rs6000_cost = &ppc620_cost;
5049 break;
5051 case PROCESSOR_PPC630:
5052 rs6000_cost = &ppc630_cost;
5053 break;
5055 case PROCESSOR_CELL:
5056 rs6000_cost = &ppccell_cost;
5057 break;
5059 case PROCESSOR_PPC750:
5060 case PROCESSOR_PPC7400:
5061 rs6000_cost = &ppc750_cost;
5062 break;
5064 case PROCESSOR_PPC7450:
5065 rs6000_cost = &ppc7450_cost;
5066 break;
5068 case PROCESSOR_PPC8540:
5069 case PROCESSOR_PPC8548:
5070 rs6000_cost = &ppc8540_cost;
5071 break;
5073 case PROCESSOR_PPCE300C2:
5074 case PROCESSOR_PPCE300C3:
5075 rs6000_cost = &ppce300c2c3_cost;
5076 break;
5078 case PROCESSOR_PPCE500MC:
5079 rs6000_cost = &ppce500mc_cost;
5080 break;
5082 case PROCESSOR_PPCE500MC64:
5083 rs6000_cost = &ppce500mc64_cost;
5084 break;
5086 case PROCESSOR_PPCE5500:
5087 rs6000_cost = &ppce5500_cost;
5088 break;
5090 case PROCESSOR_PPCE6500:
5091 rs6000_cost = &ppce6500_cost;
5092 break;
5094 case PROCESSOR_TITAN:
5095 rs6000_cost = &titan_cost;
5096 break;
5098 case PROCESSOR_POWER4:
5099 case PROCESSOR_POWER5:
5100 rs6000_cost = &power4_cost;
5101 break;
5103 case PROCESSOR_POWER6:
5104 rs6000_cost = &power6_cost;
5105 break;
5107 case PROCESSOR_POWER7:
5108 rs6000_cost = &power7_cost;
5109 break;
5111 case PROCESSOR_POWER8:
5112 rs6000_cost = &power8_cost;
5113 break;
5115 case PROCESSOR_POWER9:
5116 rs6000_cost = &power9_cost;
5117 break;
5119 case PROCESSOR_PPCA2:
5120 rs6000_cost = &ppca2_cost;
5121 break;
5123 default:
5124 gcc_unreachable ();
5127 if (global_init_p)
5129 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5130 rs6000_cost->simultaneous_prefetches,
5131 global_options.x_param_values,
5132 global_options_set.x_param_values);
5133 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5134 global_options.x_param_values,
5135 global_options_set.x_param_values);
5136 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5137 rs6000_cost->cache_line_size,
5138 global_options.x_param_values,
5139 global_options_set.x_param_values);
5140 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5141 global_options.x_param_values,
5142 global_options_set.x_param_values);
5144 /* Increase loop peeling limits based on performance analysis. */
5145 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5146 global_options.x_param_values,
5147 global_options_set.x_param_values);
5148 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5149 global_options.x_param_values,
5150 global_options_set.x_param_values);
5152 /* Use the 'model' -fsched-pressure algorithm by default. */
5153 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5154 SCHED_PRESSURE_MODEL,
5155 global_options.x_param_values,
5156 global_options_set.x_param_values);
5158 /* If using typedef char *va_list, signal that
5159 __builtin_va_start (&ap, 0) can be optimized to
5160 ap = __builtin_next_arg (0). */
5161 if (DEFAULT_ABI != ABI_V4)
5162 targetm.expand_builtin_va_start = NULL;
5165 /* Set up single/double float flags.
5166 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5167 then set both flags. */
5168 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5169 rs6000_single_float = rs6000_double_float = 1;
5171 /* If not explicitly specified via option, decide whether to generate indexed
5172 load/store instructions. A value of -1 indicates that the
5173 initial value of this variable has not been overwritten. During
5174 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5175 if (TARGET_AVOID_XFORM == -1)
5176 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5177 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5178 need indexed accesses and the type used is the scalar type of the element
5179 being loaded or stored. */
5180 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5181 && !TARGET_ALTIVEC);
5183 /* Set the -mrecip options. */
5184 if (rs6000_recip_name)
5186 char *p = ASTRDUP (rs6000_recip_name);
5187 char *q;
5188 unsigned int mask, i;
5189 bool invert;
5191 while ((q = strtok (p, ",")) != NULL)
5193 p = NULL;
5194 if (*q == '!')
5196 invert = true;
5197 q++;
5199 else
5200 invert = false;
5202 if (!strcmp (q, "default"))
5203 mask = ((TARGET_RECIP_PRECISION)
5204 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5205 else
5207 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5208 if (!strcmp (q, recip_options[i].string))
5210 mask = recip_options[i].mask;
5211 break;
5214 if (i == ARRAY_SIZE (recip_options))
5216 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5217 invert = false;
5218 mask = 0;
5219 ret = false;
5223 if (invert)
5224 rs6000_recip_control &= ~mask;
5225 else
5226 rs6000_recip_control |= mask;
5230 /* Set the builtin mask of the various options used that could affect which
5231 builtins were used. In the past we used target_flags, but we've run out
5232 of bits, and some options like PAIRED are no longer in target_flags. */
5233 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5234 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5235 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5236 rs6000_builtin_mask);
5238 /* Initialize all of the registers. */
5239 rs6000_init_hard_regno_mode_ok (global_init_p);
5241 /* Save the initial options in case the user does function specific options */
5242 if (global_init_p)
5243 target_option_default_node = target_option_current_node
5244 = build_target_option_node (&global_options);
5246 /* If not explicitly specified via option, decide whether to generate the
5247 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5248 if (TARGET_LINK_STACK == -1)
5249 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5251 return ret;
5254 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5255 define the target cpu type. */
5257 static void
5258 rs6000_option_override (void)
5260 (void) rs6000_option_override_internal (true);
5264 /* Implement targetm.vectorize.builtin_mask_for_load. */
5265 static tree
5266 rs6000_builtin_mask_for_load (void)
5268 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5269 if ((TARGET_ALTIVEC && !TARGET_VSX)
5270 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5271 return altivec_builtin_mask_for_load;
5272 else
5273 return 0;
5276 /* Implement LOOP_ALIGN. */
5278 rs6000_loop_align (rtx label)
5280 basic_block bb;
5281 int ninsns;
5283 /* Don't override loop alignment if -falign-loops was specified. */
5284 if (!can_override_loop_align)
5285 return align_loops_log;
5287 bb = BLOCK_FOR_INSN (label);
5288 ninsns = num_loop_insns(bb->loop_father);
5290 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5291 if (ninsns > 4 && ninsns <= 8
5292 && (rs6000_cpu == PROCESSOR_POWER4
5293 || rs6000_cpu == PROCESSOR_POWER5
5294 || rs6000_cpu == PROCESSOR_POWER6
5295 || rs6000_cpu == PROCESSOR_POWER7
5296 || rs6000_cpu == PROCESSOR_POWER8
5297 || rs6000_cpu == PROCESSOR_POWER9))
5298 return 5;
5299 else
5300 return align_loops_log;
5303 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5304 static int
5305 rs6000_loop_align_max_skip (rtx_insn *label)
5307 return (1 << rs6000_loop_align (label)) - 1;
5310 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5311 after applying N number of iterations. This routine does not determine
5312 how may iterations are required to reach desired alignment. */
5314 static bool
5315 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5317 if (is_packed)
5318 return false;
5320 if (TARGET_32BIT)
5322 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5323 return true;
5325 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5326 return true;
5328 return false;
5330 else
5332 if (TARGET_MACHO)
5333 return false;
5335 /* Assuming that all other types are naturally aligned. CHECKME! */
5336 return true;
5340 /* Return true if the vector misalignment factor is supported by the
5341 target. */
5342 static bool
5343 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5344 const_tree type,
5345 int misalignment,
5346 bool is_packed)
5348 if (TARGET_VSX)
5350 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5351 return true;
5353 /* Return if movmisalign pattern is not supported for this mode. */
5354 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5355 return false;
5357 if (misalignment == -1)
5359 /* Misalignment factor is unknown at compile time but we know
5360 it's word aligned. */
5361 if (rs6000_vector_alignment_reachable (type, is_packed))
5363 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5365 if (element_size == 64 || element_size == 32)
5366 return true;
5369 return false;
5372 /* VSX supports word-aligned vector. */
5373 if (misalignment % 4 == 0)
5374 return true;
5376 return false;
5379 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5380 static int
5381 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5382 tree vectype, int misalign)
5384 unsigned elements;
5385 tree elem_type;
5387 switch (type_of_cost)
5389 case scalar_stmt:
5390 case scalar_load:
5391 case scalar_store:
5392 case vector_stmt:
5393 case vector_load:
5394 case vector_store:
5395 case vec_to_scalar:
5396 case scalar_to_vec:
5397 case cond_branch_not_taken:
5398 return 1;
5400 case vec_perm:
5401 if (TARGET_VSX)
5402 return 3;
5403 else
5404 return 1;
5406 case vec_promote_demote:
5407 if (TARGET_VSX)
5408 return 4;
5409 else
5410 return 1;
5412 case cond_branch_taken:
5413 return 3;
5415 case unaligned_load:
5416 if (TARGET_P9_VECTOR)
5417 return 3;
5419 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5420 return 1;
5422 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5424 elements = TYPE_VECTOR_SUBPARTS (vectype);
5425 if (elements == 2)
5426 /* Double word aligned. */
5427 return 2;
5429 if (elements == 4)
5431 switch (misalign)
5433 case 8:
5434 /* Double word aligned. */
5435 return 2;
5437 case -1:
5438 /* Unknown misalignment. */
5439 case 4:
5440 case 12:
5441 /* Word aligned. */
5442 return 22;
5444 default:
5445 gcc_unreachable ();
5450 if (TARGET_ALTIVEC)
5451 /* Misaligned loads are not supported. */
5452 gcc_unreachable ();
5454 return 2;
5456 case unaligned_store:
5457 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5458 return 1;
5460 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5462 elements = TYPE_VECTOR_SUBPARTS (vectype);
5463 if (elements == 2)
5464 /* Double word aligned. */
5465 return 2;
5467 if (elements == 4)
5469 switch (misalign)
5471 case 8:
5472 /* Double word aligned. */
5473 return 2;
5475 case -1:
5476 /* Unknown misalignment. */
5477 case 4:
5478 case 12:
5479 /* Word aligned. */
5480 return 23;
5482 default:
5483 gcc_unreachable ();
5488 if (TARGET_ALTIVEC)
5489 /* Misaligned stores are not supported. */
5490 gcc_unreachable ();
5492 return 2;
5494 case vec_construct:
5495 /* This is a rough approximation assuming non-constant elements
5496 constructed into a vector via element insertion. FIXME:
5497 vec_construct is not granular enough for uniformly good
5498 decisions. If the initialization is a splat, this is
5499 cheaper than we estimate. Improve this someday. */
5500 elem_type = TREE_TYPE (vectype);
5501 /* 32-bit vectors loaded into registers are stored as double
5502 precision, so we need 2 permutes, 2 converts, and 1 merge
5503 to construct a vector of short floats from them. */
5504 if (SCALAR_FLOAT_TYPE_P (elem_type)
5505 && TYPE_PRECISION (elem_type) == 32)
5506 return 5;
5507 /* On POWER9, integer vector types are built up in GPRs and then
5508 use a direct move (2 cycles). For POWER8 this is even worse,
5509 as we need two direct moves and a merge, and the direct moves
5510 are five cycles. */
5511 else if (INTEGRAL_TYPE_P (elem_type))
5513 if (TARGET_P9_VECTOR)
5514 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5515 else
5516 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5518 else
5519 /* V2DFmode doesn't need a direct move. */
5520 return 2;
5522 default:
5523 gcc_unreachable ();
5527 /* Implement targetm.vectorize.preferred_simd_mode. */
5529 static machine_mode
5530 rs6000_preferred_simd_mode (scalar_mode mode)
5532 if (TARGET_VSX)
5533 switch (mode)
5535 case E_DFmode:
5536 return V2DFmode;
5537 default:;
5539 if (TARGET_ALTIVEC || TARGET_VSX)
5540 switch (mode)
5542 case E_SFmode:
5543 return V4SFmode;
5544 case E_TImode:
5545 return V1TImode;
5546 case E_DImode:
5547 return V2DImode;
5548 case E_SImode:
5549 return V4SImode;
5550 case E_HImode:
5551 return V8HImode;
5552 case E_QImode:
5553 return V16QImode;
5554 default:;
5556 if (TARGET_PAIRED_FLOAT
5557 && mode == SFmode)
5558 return V2SFmode;
5559 return word_mode;
5562 typedef struct _rs6000_cost_data
5564 struct loop *loop_info;
5565 unsigned cost[3];
5566 } rs6000_cost_data;
5568 /* Test for likely overcommitment of vector hardware resources. If a
5569 loop iteration is relatively large, and too large a percentage of
5570 instructions in the loop are vectorized, the cost model may not
5571 adequately reflect delays from unavailable vector resources.
5572 Penalize the loop body cost for this case. */
5574 static void
5575 rs6000_density_test (rs6000_cost_data *data)
5577 const int DENSITY_PCT_THRESHOLD = 85;
5578 const int DENSITY_SIZE_THRESHOLD = 70;
5579 const int DENSITY_PENALTY = 10;
5580 struct loop *loop = data->loop_info;
5581 basic_block *bbs = get_loop_body (loop);
5582 int nbbs = loop->num_nodes;
5583 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5584 int i, density_pct;
5586 for (i = 0; i < nbbs; i++)
5588 basic_block bb = bbs[i];
5589 gimple_stmt_iterator gsi;
5591 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5593 gimple *stmt = gsi_stmt (gsi);
5594 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5596 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5597 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5598 not_vec_cost++;
5602 free (bbs);
5603 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5605 if (density_pct > DENSITY_PCT_THRESHOLD
5606 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5608 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5609 if (dump_enabled_p ())
5610 dump_printf_loc (MSG_NOTE, vect_location,
5611 "density %d%%, cost %d exceeds threshold, penalizing "
5612 "loop body cost by %d%%", density_pct,
5613 vec_cost + not_vec_cost, DENSITY_PENALTY);
5617 /* Implement targetm.vectorize.init_cost. */
5619 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5620 instruction is needed by the vectorization. */
5621 static bool rs6000_vect_nonmem;
5623 static void *
5624 rs6000_init_cost (struct loop *loop_info)
5626 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5627 data->loop_info = loop_info;
5628 data->cost[vect_prologue] = 0;
5629 data->cost[vect_body] = 0;
5630 data->cost[vect_epilogue] = 0;
5631 rs6000_vect_nonmem = false;
5632 return data;
5635 /* Implement targetm.vectorize.add_stmt_cost. */
5637 static unsigned
5638 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5639 struct _stmt_vec_info *stmt_info, int misalign,
5640 enum vect_cost_model_location where)
5642 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5643 unsigned retval = 0;
5645 if (flag_vect_cost_model)
5647 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5648 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5649 misalign);
5650 /* Statements in an inner loop relative to the loop being
5651 vectorized are weighted more heavily. The value here is
5652 arbitrary and could potentially be improved with analysis. */
5653 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5654 count *= 50; /* FIXME. */
5656 retval = (unsigned) (count * stmt_cost);
5657 cost_data->cost[where] += retval;
5659 /* Check whether we're doing something other than just a copy loop.
5660 Not all such loops may be profitably vectorized; see
5661 rs6000_finish_cost. */
5662 if ((kind == vec_to_scalar || kind == vec_perm
5663 || kind == vec_promote_demote || kind == vec_construct
5664 || kind == scalar_to_vec)
5665 || (where == vect_body && kind == vector_stmt))
5666 rs6000_vect_nonmem = true;
5669 return retval;
5672 /* Implement targetm.vectorize.finish_cost. */
5674 static void
5675 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5676 unsigned *body_cost, unsigned *epilogue_cost)
5678 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5680 if (cost_data->loop_info)
5681 rs6000_density_test (cost_data);
5683 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5684 that require versioning for any reason. The vectorization is at
5685 best a wash inside the loop, and the versioning checks make
5686 profitability highly unlikely and potentially quite harmful. */
5687 if (cost_data->loop_info)
5689 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5690 if (!rs6000_vect_nonmem
5691 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5692 && LOOP_REQUIRES_VERSIONING (vec_info))
5693 cost_data->cost[vect_body] += 10000;
5696 *prologue_cost = cost_data->cost[vect_prologue];
5697 *body_cost = cost_data->cost[vect_body];
5698 *epilogue_cost = cost_data->cost[vect_epilogue];
5701 /* Implement targetm.vectorize.destroy_cost_data. */
5703 static void
5704 rs6000_destroy_cost_data (void *data)
5706 free (data);
5709 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5710 library with vectorized intrinsics. */
5712 static tree
5713 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5714 tree type_in)
5716 char name[32];
5717 const char *suffix = NULL;
5718 tree fntype, new_fndecl, bdecl = NULL_TREE;
5719 int n_args = 1;
5720 const char *bname;
5721 machine_mode el_mode, in_mode;
5722 int n, in_n;
5724 /* Libmass is suitable for unsafe math only as it does not correctly support
5725 parts of IEEE with the required precision such as denormals. Only support
5726 it if we have VSX to use the simd d2 or f4 functions.
5727 XXX: Add variable length support. */
5728 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5729 return NULL_TREE;
5731 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5732 n = TYPE_VECTOR_SUBPARTS (type_out);
5733 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5734 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5735 if (el_mode != in_mode
5736 || n != in_n)
5737 return NULL_TREE;
5739 switch (fn)
5741 CASE_CFN_ATAN2:
5742 CASE_CFN_HYPOT:
5743 CASE_CFN_POW:
5744 n_args = 2;
5745 gcc_fallthrough ();
5747 CASE_CFN_ACOS:
5748 CASE_CFN_ACOSH:
5749 CASE_CFN_ASIN:
5750 CASE_CFN_ASINH:
5751 CASE_CFN_ATAN:
5752 CASE_CFN_ATANH:
5753 CASE_CFN_CBRT:
5754 CASE_CFN_COS:
5755 CASE_CFN_COSH:
5756 CASE_CFN_ERF:
5757 CASE_CFN_ERFC:
5758 CASE_CFN_EXP2:
5759 CASE_CFN_EXP:
5760 CASE_CFN_EXPM1:
5761 CASE_CFN_LGAMMA:
5762 CASE_CFN_LOG10:
5763 CASE_CFN_LOG1P:
5764 CASE_CFN_LOG2:
5765 CASE_CFN_LOG:
5766 CASE_CFN_SIN:
5767 CASE_CFN_SINH:
5768 CASE_CFN_SQRT:
5769 CASE_CFN_TAN:
5770 CASE_CFN_TANH:
5771 if (el_mode == DFmode && n == 2)
5773 bdecl = mathfn_built_in (double_type_node, fn);
5774 suffix = "d2"; /* pow -> powd2 */
5776 else if (el_mode == SFmode && n == 4)
5778 bdecl = mathfn_built_in (float_type_node, fn);
5779 suffix = "4"; /* powf -> powf4 */
5781 else
5782 return NULL_TREE;
5783 if (!bdecl)
5784 return NULL_TREE;
5785 break;
5787 default:
5788 return NULL_TREE;
5791 gcc_assert (suffix != NULL);
5792 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5793 if (!bname)
5794 return NULL_TREE;
5796 strcpy (name, bname + sizeof ("__builtin_") - 1);
5797 strcat (name, suffix);
5799 if (n_args == 1)
5800 fntype = build_function_type_list (type_out, type_in, NULL);
5801 else if (n_args == 2)
5802 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5803 else
5804 gcc_unreachable ();
5806 /* Build a function declaration for the vectorized function. */
5807 new_fndecl = build_decl (BUILTINS_LOCATION,
5808 FUNCTION_DECL, get_identifier (name), fntype);
5809 TREE_PUBLIC (new_fndecl) = 1;
5810 DECL_EXTERNAL (new_fndecl) = 1;
5811 DECL_IS_NOVOPS (new_fndecl) = 1;
5812 TREE_READONLY (new_fndecl) = 1;
5814 return new_fndecl;
5817 /* Returns a function decl for a vectorized version of the builtin function
5818 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5819 if it is not available. */
5821 static tree
5822 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5823 tree type_in)
5825 machine_mode in_mode, out_mode;
5826 int in_n, out_n;
5828 if (TARGET_DEBUG_BUILTIN)
5829 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5830 combined_fn_name (combined_fn (fn)),
5831 GET_MODE_NAME (TYPE_MODE (type_out)),
5832 GET_MODE_NAME (TYPE_MODE (type_in)));
5834 if (TREE_CODE (type_out) != VECTOR_TYPE
5835 || TREE_CODE (type_in) != VECTOR_TYPE)
5836 return NULL_TREE;
5838 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5839 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5840 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5841 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5843 switch (fn)
5845 CASE_CFN_COPYSIGN:
5846 if (VECTOR_UNIT_VSX_P (V2DFmode)
5847 && out_mode == DFmode && out_n == 2
5848 && in_mode == DFmode && in_n == 2)
5849 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5850 if (VECTOR_UNIT_VSX_P (V4SFmode)
5851 && out_mode == SFmode && out_n == 4
5852 && in_mode == SFmode && in_n == 4)
5853 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5854 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5855 && out_mode == SFmode && out_n == 4
5856 && in_mode == SFmode && in_n == 4)
5857 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5858 break;
5859 CASE_CFN_CEIL:
5860 if (VECTOR_UNIT_VSX_P (V2DFmode)
5861 && out_mode == DFmode && out_n == 2
5862 && in_mode == DFmode && in_n == 2)
5863 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5864 if (VECTOR_UNIT_VSX_P (V4SFmode)
5865 && out_mode == SFmode && out_n == 4
5866 && in_mode == SFmode && in_n == 4)
5867 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5868 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5869 && out_mode == SFmode && out_n == 4
5870 && in_mode == SFmode && in_n == 4)
5871 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5872 break;
5873 CASE_CFN_FLOOR:
5874 if (VECTOR_UNIT_VSX_P (V2DFmode)
5875 && out_mode == DFmode && out_n == 2
5876 && in_mode == DFmode && in_n == 2)
5877 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5878 if (VECTOR_UNIT_VSX_P (V4SFmode)
5879 && out_mode == SFmode && out_n == 4
5880 && in_mode == SFmode && in_n == 4)
5881 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5882 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5883 && out_mode == SFmode && out_n == 4
5884 && in_mode == SFmode && in_n == 4)
5885 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5886 break;
5887 CASE_CFN_FMA:
5888 if (VECTOR_UNIT_VSX_P (V2DFmode)
5889 && out_mode == DFmode && out_n == 2
5890 && in_mode == DFmode && in_n == 2)
5891 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5892 if (VECTOR_UNIT_VSX_P (V4SFmode)
5893 && out_mode == SFmode && out_n == 4
5894 && in_mode == SFmode && in_n == 4)
5895 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5896 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5897 && out_mode == SFmode && out_n == 4
5898 && in_mode == SFmode && in_n == 4)
5899 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5900 break;
5901 CASE_CFN_TRUNC:
5902 if (VECTOR_UNIT_VSX_P (V2DFmode)
5903 && out_mode == DFmode && out_n == 2
5904 && in_mode == DFmode && in_n == 2)
5905 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5906 if (VECTOR_UNIT_VSX_P (V4SFmode)
5907 && out_mode == SFmode && out_n == 4
5908 && in_mode == SFmode && in_n == 4)
5909 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5910 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5911 && out_mode == SFmode && out_n == 4
5912 && in_mode == SFmode && in_n == 4)
5913 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5914 break;
5915 CASE_CFN_NEARBYINT:
5916 if (VECTOR_UNIT_VSX_P (V2DFmode)
5917 && flag_unsafe_math_optimizations
5918 && out_mode == DFmode && out_n == 2
5919 && in_mode == DFmode && in_n == 2)
5920 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5921 if (VECTOR_UNIT_VSX_P (V4SFmode)
5922 && flag_unsafe_math_optimizations
5923 && out_mode == SFmode && out_n == 4
5924 && in_mode == SFmode && in_n == 4)
5925 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5926 break;
5927 CASE_CFN_RINT:
5928 if (VECTOR_UNIT_VSX_P (V2DFmode)
5929 && !flag_trapping_math
5930 && out_mode == DFmode && out_n == 2
5931 && in_mode == DFmode && in_n == 2)
5932 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5933 if (VECTOR_UNIT_VSX_P (V4SFmode)
5934 && !flag_trapping_math
5935 && out_mode == SFmode && out_n == 4
5936 && in_mode == SFmode && in_n == 4)
5937 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5938 break;
5939 default:
5940 break;
5943 /* Generate calls to libmass if appropriate. */
5944 if (rs6000_veclib_handler)
5945 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5947 return NULL_TREE;
5950 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5952 static tree
5953 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5954 tree type_in)
5956 machine_mode in_mode, out_mode;
5957 int in_n, out_n;
5959 if (TARGET_DEBUG_BUILTIN)
5960 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5961 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5962 GET_MODE_NAME (TYPE_MODE (type_out)),
5963 GET_MODE_NAME (TYPE_MODE (type_in)));
5965 if (TREE_CODE (type_out) != VECTOR_TYPE
5966 || TREE_CODE (type_in) != VECTOR_TYPE)
5967 return NULL_TREE;
5969 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5970 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5971 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5972 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5974 enum rs6000_builtins fn
5975 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5976 switch (fn)
5978 case RS6000_BUILTIN_RSQRTF:
5979 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5980 && out_mode == SFmode && out_n == 4
5981 && in_mode == SFmode && in_n == 4)
5982 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5983 break;
5984 case RS6000_BUILTIN_RSQRT:
5985 if (VECTOR_UNIT_VSX_P (V2DFmode)
5986 && out_mode == DFmode && out_n == 2
5987 && in_mode == DFmode && in_n == 2)
5988 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5989 break;
5990 case RS6000_BUILTIN_RECIPF:
5991 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5992 && out_mode == SFmode && out_n == 4
5993 && in_mode == SFmode && in_n == 4)
5994 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5995 break;
5996 case RS6000_BUILTIN_RECIP:
5997 if (VECTOR_UNIT_VSX_P (V2DFmode)
5998 && out_mode == DFmode && out_n == 2
5999 && in_mode == DFmode && in_n == 2)
6000 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6001 break;
6002 default:
6003 break;
6005 return NULL_TREE;
6008 /* Default CPU string for rs6000*_file_start functions. */
6009 static const char *rs6000_default_cpu;
6011 /* Do anything needed at the start of the asm file. */
6013 static void
6014 rs6000_file_start (void)
6016 char buffer[80];
6017 const char *start = buffer;
6018 FILE *file = asm_out_file;
6020 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6022 default_file_start ();
6024 if (flag_verbose_asm)
6026 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6028 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6030 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6031 start = "";
6034 if (global_options_set.x_rs6000_cpu_index)
6036 fprintf (file, "%s -mcpu=%s", start,
6037 processor_target_table[rs6000_cpu_index].name);
6038 start = "";
6041 if (global_options_set.x_rs6000_tune_index)
6043 fprintf (file, "%s -mtune=%s", start,
6044 processor_target_table[rs6000_tune_index].name);
6045 start = "";
6048 if (PPC405_ERRATUM77)
6050 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6051 start = "";
6054 #ifdef USING_ELFOS_H
6055 switch (rs6000_sdata)
6057 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6058 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6059 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6060 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6063 if (rs6000_sdata && g_switch_value)
6065 fprintf (file, "%s -G %d", start,
6066 g_switch_value);
6067 start = "";
6069 #endif
6071 if (*start == '\0')
6072 putc ('\n', file);
6075 #ifdef USING_ELFOS_H
6076 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6077 && !global_options_set.x_rs6000_cpu_index)
6079 fputs ("\t.machine ", asm_out_file);
6080 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6081 fputs ("power9\n", asm_out_file);
6082 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6083 fputs ("power8\n", asm_out_file);
6084 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6085 fputs ("power7\n", asm_out_file);
6086 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6087 fputs ("power6\n", asm_out_file);
6088 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6089 fputs ("power5\n", asm_out_file);
6090 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6091 fputs ("power4\n", asm_out_file);
6092 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6093 fputs ("ppc64\n", asm_out_file);
6094 else
6095 fputs ("ppc\n", asm_out_file);
6097 #endif
6099 if (DEFAULT_ABI == ABI_ELFv2)
6100 fprintf (file, "\t.abiversion 2\n");
6104 /* Return nonzero if this function is known to have a null epilogue. */
6107 direct_return (void)
6109 if (reload_completed)
6111 rs6000_stack_t *info = rs6000_stack_info ();
6113 if (info->first_gp_reg_save == 32
6114 && info->first_fp_reg_save == 64
6115 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6116 && ! info->lr_save_p
6117 && ! info->cr_save_p
6118 && info->vrsave_size == 0
6119 && ! info->push_p)
6120 return 1;
6123 return 0;
6126 /* Return the number of instructions it takes to form a constant in an
6127 integer register. */
6130 num_insns_constant_wide (HOST_WIDE_INT value)
6132 /* signed constant loadable with addi */
6133 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6134 return 1;
6136 /* constant loadable with addis */
6137 else if ((value & 0xffff) == 0
6138 && (value >> 31 == -1 || value >> 31 == 0))
6139 return 1;
6141 else if (TARGET_POWERPC64)
6143 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6144 HOST_WIDE_INT high = value >> 31;
6146 if (high == 0 || high == -1)
6147 return 2;
6149 high >>= 1;
6151 if (low == 0)
6152 return num_insns_constant_wide (high) + 1;
6153 else if (high == 0)
6154 return num_insns_constant_wide (low) + 1;
6155 else
6156 return (num_insns_constant_wide (high)
6157 + num_insns_constant_wide (low) + 1);
6160 else
6161 return 2;
6165 num_insns_constant (rtx op, machine_mode mode)
6167 HOST_WIDE_INT low, high;
6169 switch (GET_CODE (op))
6171 case CONST_INT:
6172 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6173 && rs6000_is_valid_and_mask (op, mode))
6174 return 2;
6175 else
6176 return num_insns_constant_wide (INTVAL (op));
6178 case CONST_WIDE_INT:
6180 int i;
6181 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6182 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6183 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6184 return ins;
6187 case CONST_DOUBLE:
6188 if (mode == SFmode || mode == SDmode)
6190 long l;
6192 if (DECIMAL_FLOAT_MODE_P (mode))
6193 REAL_VALUE_TO_TARGET_DECIMAL32
6194 (*CONST_DOUBLE_REAL_VALUE (op), l);
6195 else
6196 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6197 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6200 long l[2];
6201 if (DECIMAL_FLOAT_MODE_P (mode))
6202 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6203 else
6204 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6205 high = l[WORDS_BIG_ENDIAN == 0];
6206 low = l[WORDS_BIG_ENDIAN != 0];
6208 if (TARGET_32BIT)
6209 return (num_insns_constant_wide (low)
6210 + num_insns_constant_wide (high));
6211 else
6213 if ((high == 0 && low >= 0)
6214 || (high == -1 && low < 0))
6215 return num_insns_constant_wide (low);
6217 else if (rs6000_is_valid_and_mask (op, mode))
6218 return 2;
6220 else if (low == 0)
6221 return num_insns_constant_wide (high) + 1;
6223 else
6224 return (num_insns_constant_wide (high)
6225 + num_insns_constant_wide (low) + 1);
6228 default:
6229 gcc_unreachable ();
6233 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6234 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6235 corresponding element of the vector, but for V4SFmode and V2SFmode,
6236 the corresponding "float" is interpreted as an SImode integer. */
6238 HOST_WIDE_INT
6239 const_vector_elt_as_int (rtx op, unsigned int elt)
6241 rtx tmp;
6243 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6244 gcc_assert (GET_MODE (op) != V2DImode
6245 && GET_MODE (op) != V2DFmode);
6247 tmp = CONST_VECTOR_ELT (op, elt);
6248 if (GET_MODE (op) == V4SFmode
6249 || GET_MODE (op) == V2SFmode)
6250 tmp = gen_lowpart (SImode, tmp);
6251 return INTVAL (tmp);
6254 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6255 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6256 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6257 all items are set to the same value and contain COPIES replicas of the
6258 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6259 operand and the others are set to the value of the operand's msb. */
6261 static bool
6262 vspltis_constant (rtx op, unsigned step, unsigned copies)
6264 machine_mode mode = GET_MODE (op);
6265 machine_mode inner = GET_MODE_INNER (mode);
6267 unsigned i;
6268 unsigned nunits;
6269 unsigned bitsize;
6270 unsigned mask;
6272 HOST_WIDE_INT val;
6273 HOST_WIDE_INT splat_val;
6274 HOST_WIDE_INT msb_val;
6276 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6277 return false;
6279 nunits = GET_MODE_NUNITS (mode);
6280 bitsize = GET_MODE_BITSIZE (inner);
6281 mask = GET_MODE_MASK (inner);
6283 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6284 splat_val = val;
6285 msb_val = val >= 0 ? 0 : -1;
6287 /* Construct the value to be splatted, if possible. If not, return 0. */
6288 for (i = 2; i <= copies; i *= 2)
6290 HOST_WIDE_INT small_val;
6291 bitsize /= 2;
6292 small_val = splat_val >> bitsize;
6293 mask >>= bitsize;
6294 if (splat_val != ((HOST_WIDE_INT)
6295 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6296 | (small_val & mask)))
6297 return false;
6298 splat_val = small_val;
6301 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6302 if (EASY_VECTOR_15 (splat_val))
6305 /* Also check if we can splat, and then add the result to itself. Do so if
6306 the value is positive, of if the splat instruction is using OP's mode;
6307 for splat_val < 0, the splat and the add should use the same mode. */
6308 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6309 && (splat_val >= 0 || (step == 1 && copies == 1)))
6312 /* Also check if are loading up the most significant bit which can be done by
6313 loading up -1 and shifting the value left by -1. */
6314 else if (EASY_VECTOR_MSB (splat_val, inner))
6317 else
6318 return false;
6320 /* Check if VAL is present in every STEP-th element, and the
6321 other elements are filled with its most significant bit. */
6322 for (i = 1; i < nunits; ++i)
6324 HOST_WIDE_INT desired_val;
6325 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6326 if ((i & (step - 1)) == 0)
6327 desired_val = val;
6328 else
6329 desired_val = msb_val;
6331 if (desired_val != const_vector_elt_as_int (op, elt))
6332 return false;
6335 return true;
6338 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6339 instruction, filling in the bottom elements with 0 or -1.
6341 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6342 for the number of zeroes to shift in, or negative for the number of 0xff
6343 bytes to shift in.
6345 OP is a CONST_VECTOR. */
6348 vspltis_shifted (rtx op)
6350 machine_mode mode = GET_MODE (op);
6351 machine_mode inner = GET_MODE_INNER (mode);
6353 unsigned i, j;
6354 unsigned nunits;
6355 unsigned mask;
6357 HOST_WIDE_INT val;
6359 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6360 return false;
6362 /* We need to create pseudo registers to do the shift, so don't recognize
6363 shift vector constants after reload. */
6364 if (!can_create_pseudo_p ())
6365 return false;
6367 nunits = GET_MODE_NUNITS (mode);
6368 mask = GET_MODE_MASK (inner);
6370 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6372 /* Check if the value can really be the operand of a vspltis[bhw]. */
6373 if (EASY_VECTOR_15 (val))
6376 /* Also check if we are loading up the most significant bit which can be done
6377 by loading up -1 and shifting the value left by -1. */
6378 else if (EASY_VECTOR_MSB (val, inner))
6381 else
6382 return 0;
6384 /* Check if VAL is present in every STEP-th element until we find elements
6385 that are 0 or all 1 bits. */
6386 for (i = 1; i < nunits; ++i)
6388 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6389 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6391 /* If the value isn't the splat value, check for the remaining elements
6392 being 0/-1. */
6393 if (val != elt_val)
6395 if (elt_val == 0)
6397 for (j = i+1; j < nunits; ++j)
6399 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6400 if (const_vector_elt_as_int (op, elt2) != 0)
6401 return 0;
6404 return (nunits - i) * GET_MODE_SIZE (inner);
6407 else if ((elt_val & mask) == mask)
6409 for (j = i+1; j < nunits; ++j)
6411 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6412 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6413 return 0;
6416 return -((nunits - i) * GET_MODE_SIZE (inner));
6419 else
6420 return 0;
6424 /* If all elements are equal, we don't need to do VLSDOI. */
6425 return 0;
6429 /* Return true if OP is of the given MODE and can be synthesized
6430 with a vspltisb, vspltish or vspltisw. */
6432 bool
6433 easy_altivec_constant (rtx op, machine_mode mode)
6435 unsigned step, copies;
6437 if (mode == VOIDmode)
6438 mode = GET_MODE (op);
6439 else if (mode != GET_MODE (op))
6440 return false;
6442 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6443 constants. */
6444 if (mode == V2DFmode)
6445 return zero_constant (op, mode);
6447 else if (mode == V2DImode)
6449 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6450 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6451 return false;
6453 if (zero_constant (op, mode))
6454 return true;
6456 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6457 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6458 return true;
6460 return false;
6463 /* V1TImode is a special container for TImode. Ignore for now. */
6464 else if (mode == V1TImode)
6465 return false;
6467 /* Start with a vspltisw. */
6468 step = GET_MODE_NUNITS (mode) / 4;
6469 copies = 1;
6471 if (vspltis_constant (op, step, copies))
6472 return true;
6474 /* Then try with a vspltish. */
6475 if (step == 1)
6476 copies <<= 1;
6477 else
6478 step >>= 1;
6480 if (vspltis_constant (op, step, copies))
6481 return true;
6483 /* And finally a vspltisb. */
6484 if (step == 1)
6485 copies <<= 1;
6486 else
6487 step >>= 1;
6489 if (vspltis_constant (op, step, copies))
6490 return true;
6492 if (vspltis_shifted (op) != 0)
6493 return true;
6495 return false;
6498 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6499 result is OP. Abort if it is not possible. */
6502 gen_easy_altivec_constant (rtx op)
6504 machine_mode mode = GET_MODE (op);
6505 int nunits = GET_MODE_NUNITS (mode);
6506 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6507 unsigned step = nunits / 4;
6508 unsigned copies = 1;
6510 /* Start with a vspltisw. */
6511 if (vspltis_constant (op, step, copies))
6512 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6514 /* Then try with a vspltish. */
6515 if (step == 1)
6516 copies <<= 1;
6517 else
6518 step >>= 1;
6520 if (vspltis_constant (op, step, copies))
6521 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6523 /* And finally a vspltisb. */
6524 if (step == 1)
6525 copies <<= 1;
6526 else
6527 step >>= 1;
6529 if (vspltis_constant (op, step, copies))
6530 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6532 gcc_unreachable ();
6535 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6536 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6538 Return the number of instructions needed (1 or 2) into the address pointed
6539 via NUM_INSNS_PTR.
6541 Return the constant that is being split via CONSTANT_PTR. */
6543 bool
6544 xxspltib_constant_p (rtx op,
6545 machine_mode mode,
6546 int *num_insns_ptr,
6547 int *constant_ptr)
6549 size_t nunits = GET_MODE_NUNITS (mode);
6550 size_t i;
6551 HOST_WIDE_INT value;
6552 rtx element;
6554 /* Set the returned values to out of bound values. */
6555 *num_insns_ptr = -1;
6556 *constant_ptr = 256;
6558 if (!TARGET_P9_VECTOR)
6559 return false;
6561 if (mode == VOIDmode)
6562 mode = GET_MODE (op);
6564 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6565 return false;
6567 /* Handle (vec_duplicate <constant>). */
6568 if (GET_CODE (op) == VEC_DUPLICATE)
6570 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6571 && mode != V2DImode)
6572 return false;
6574 element = XEXP (op, 0);
6575 if (!CONST_INT_P (element))
6576 return false;
6578 value = INTVAL (element);
6579 if (!IN_RANGE (value, -128, 127))
6580 return false;
6583 /* Handle (const_vector [...]). */
6584 else if (GET_CODE (op) == CONST_VECTOR)
6586 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6587 && mode != V2DImode)
6588 return false;
6590 element = CONST_VECTOR_ELT (op, 0);
6591 if (!CONST_INT_P (element))
6592 return false;
6594 value = INTVAL (element);
6595 if (!IN_RANGE (value, -128, 127))
6596 return false;
6598 for (i = 1; i < nunits; i++)
6600 element = CONST_VECTOR_ELT (op, i);
6601 if (!CONST_INT_P (element))
6602 return false;
6604 if (value != INTVAL (element))
6605 return false;
6609 /* Handle integer constants being loaded into the upper part of the VSX
6610 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6611 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6612 else if (CONST_INT_P (op))
6614 if (!SCALAR_INT_MODE_P (mode))
6615 return false;
6617 value = INTVAL (op);
6618 if (!IN_RANGE (value, -128, 127))
6619 return false;
6621 if (!IN_RANGE (value, -1, 0))
6623 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6624 return false;
6626 if (EASY_VECTOR_15 (value))
6627 return false;
6631 else
6632 return false;
6634 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6635 sign extend. Special case 0/-1 to allow getting any VSX register instead
6636 of an Altivec register. */
6637 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6638 && EASY_VECTOR_15 (value))
6639 return false;
6641 /* Return # of instructions and the constant byte for XXSPLTIB. */
6642 if (mode == V16QImode)
6643 *num_insns_ptr = 1;
6645 else if (IN_RANGE (value, -1, 0))
6646 *num_insns_ptr = 1;
6648 else
6649 *num_insns_ptr = 2;
6651 *constant_ptr = (int) value;
6652 return true;
6655 const char *
6656 output_vec_const_move (rtx *operands)
6658 int shift;
6659 machine_mode mode;
6660 rtx dest, vec;
6662 dest = operands[0];
6663 vec = operands[1];
6664 mode = GET_MODE (dest);
6666 if (TARGET_VSX)
6668 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6669 int xxspltib_value = 256;
6670 int num_insns = -1;
6672 if (zero_constant (vec, mode))
6674 if (TARGET_P9_VECTOR)
6675 return "xxspltib %x0,0";
6677 else if (dest_vmx_p)
6678 return "vspltisw %0,0";
6680 else
6681 return "xxlxor %x0,%x0,%x0";
6684 if (all_ones_constant (vec, mode))
6686 if (TARGET_P9_VECTOR)
6687 return "xxspltib %x0,255";
6689 else if (dest_vmx_p)
6690 return "vspltisw %0,-1";
6692 else if (TARGET_P8_VECTOR)
6693 return "xxlorc %x0,%x0,%x0";
6695 else
6696 gcc_unreachable ();
6699 if (TARGET_P9_VECTOR
6700 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6702 if (num_insns == 1)
6704 operands[2] = GEN_INT (xxspltib_value & 0xff);
6705 return "xxspltib %x0,%2";
6708 return "#";
6712 if (TARGET_ALTIVEC)
6714 rtx splat_vec;
6716 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6717 if (zero_constant (vec, mode))
6718 return "vspltisw %0,0";
6720 if (all_ones_constant (vec, mode))
6721 return "vspltisw %0,-1";
6723 /* Do we need to construct a value using VSLDOI? */
6724 shift = vspltis_shifted (vec);
6725 if (shift != 0)
6726 return "#";
6728 splat_vec = gen_easy_altivec_constant (vec);
6729 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6730 operands[1] = XEXP (splat_vec, 0);
6731 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6732 return "#";
6734 switch (GET_MODE (splat_vec))
6736 case E_V4SImode:
6737 return "vspltisw %0,%1";
6739 case E_V8HImode:
6740 return "vspltish %0,%1";
6742 case E_V16QImode:
6743 return "vspltisb %0,%1";
6745 default:
6746 gcc_unreachable ();
6750 gcc_unreachable ();
6753 /* Initialize TARGET of vector PAIRED to VALS. */
6755 void
6756 paired_expand_vector_init (rtx target, rtx vals)
6758 machine_mode mode = GET_MODE (target);
6759 int n_elts = GET_MODE_NUNITS (mode);
6760 int n_var = 0;
6761 rtx x, new_rtx, tmp, constant_op, op1, op2;
6762 int i;
6764 for (i = 0; i < n_elts; ++i)
6766 x = XVECEXP (vals, 0, i);
6767 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6768 ++n_var;
6770 if (n_var == 0)
6772 /* Load from constant pool. */
6773 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6774 return;
6777 if (n_var == 2)
6779 /* The vector is initialized only with non-constants. */
6780 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6781 XVECEXP (vals, 0, 1));
6783 emit_move_insn (target, new_rtx);
6784 return;
6787 /* One field is non-constant and the other one is a constant. Load the
6788 constant from the constant pool and use ps_merge instruction to
6789 construct the whole vector. */
6790 op1 = XVECEXP (vals, 0, 0);
6791 op2 = XVECEXP (vals, 0, 1);
6793 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6795 tmp = gen_reg_rtx (GET_MODE (constant_op));
6796 emit_move_insn (tmp, constant_op);
6798 if (CONSTANT_P (op1))
6799 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6800 else
6801 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6803 emit_move_insn (target, new_rtx);
6806 void
6807 paired_expand_vector_move (rtx operands[])
6809 rtx op0 = operands[0], op1 = operands[1];
6811 emit_move_insn (op0, op1);
6814 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6815 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6816 operands for the relation operation COND. This is a recursive
6817 function. */
6819 static void
6820 paired_emit_vector_compare (enum rtx_code rcode,
6821 rtx dest, rtx op0, rtx op1,
6822 rtx cc_op0, rtx cc_op1)
6824 rtx tmp = gen_reg_rtx (V2SFmode);
6825 rtx tmp1, max, min;
6827 gcc_assert (TARGET_PAIRED_FLOAT);
6828 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6830 switch (rcode)
6832 case LT:
6833 case LTU:
6834 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6835 return;
6836 case GE:
6837 case GEU:
6838 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6839 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6840 return;
6841 case LE:
6842 case LEU:
6843 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6844 return;
6845 case GT:
6846 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6847 return;
6848 case EQ:
6849 tmp1 = gen_reg_rtx (V2SFmode);
6850 max = gen_reg_rtx (V2SFmode);
6851 min = gen_reg_rtx (V2SFmode);
6852 gen_reg_rtx (V2SFmode);
6854 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6855 emit_insn (gen_selv2sf4
6856 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6857 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6858 emit_insn (gen_selv2sf4
6859 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6860 emit_insn (gen_subv2sf3 (tmp1, min, max));
6861 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6862 return;
6863 case NE:
6864 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6865 return;
6866 case UNLE:
6867 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6868 return;
6869 case UNLT:
6870 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6871 return;
6872 case UNGE:
6873 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6874 return;
6875 case UNGT:
6876 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6877 return;
6878 default:
6879 gcc_unreachable ();
6882 return;
6885 /* Emit vector conditional expression.
6886 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6887 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6890 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6891 rtx cond, rtx cc_op0, rtx cc_op1)
6893 enum rtx_code rcode = GET_CODE (cond);
6895 if (!TARGET_PAIRED_FLOAT)
6896 return 0;
6898 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6900 return 1;
6903 /* Initialize vector TARGET to VALS. */
6905 void
6906 rs6000_expand_vector_init (rtx target, rtx vals)
6908 machine_mode mode = GET_MODE (target);
6909 machine_mode inner_mode = GET_MODE_INNER (mode);
6910 int n_elts = GET_MODE_NUNITS (mode);
6911 int n_var = 0, one_var = -1;
6912 bool all_same = true, all_const_zero = true;
6913 rtx x, mem;
6914 int i;
6916 for (i = 0; i < n_elts; ++i)
6918 x = XVECEXP (vals, 0, i);
6919 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6920 ++n_var, one_var = i;
6921 else if (x != CONST0_RTX (inner_mode))
6922 all_const_zero = false;
6924 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6925 all_same = false;
6928 if (n_var == 0)
6930 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6931 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6932 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6934 /* Zero register. */
6935 emit_move_insn (target, CONST0_RTX (mode));
6936 return;
6938 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6940 /* Splat immediate. */
6941 emit_insn (gen_rtx_SET (target, const_vec));
6942 return;
6944 else
6946 /* Load from constant pool. */
6947 emit_move_insn (target, const_vec);
6948 return;
6952 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6953 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6955 rtx op[2];
6956 size_t i;
6957 size_t num_elements = all_same ? 1 : 2;
6958 for (i = 0; i < num_elements; i++)
6960 op[i] = XVECEXP (vals, 0, i);
6961 /* Just in case there is a SUBREG with a smaller mode, do a
6962 conversion. */
6963 if (GET_MODE (op[i]) != inner_mode)
6965 rtx tmp = gen_reg_rtx (inner_mode);
6966 convert_move (tmp, op[i], 0);
6967 op[i] = tmp;
6969 /* Allow load with splat double word. */
6970 else if (MEM_P (op[i]))
6972 if (!all_same)
6973 op[i] = force_reg (inner_mode, op[i]);
6975 else if (!REG_P (op[i]))
6976 op[i] = force_reg (inner_mode, op[i]);
6979 if (all_same)
6981 if (mode == V2DFmode)
6982 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6983 else
6984 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6986 else
6988 if (mode == V2DFmode)
6989 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6990 else
6991 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6993 return;
6996 /* Special case initializing vector int if we are on 64-bit systems with
6997 direct move or we have the ISA 3.0 instructions. */
6998 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6999 && TARGET_DIRECT_MOVE_64BIT)
7001 if (all_same)
7003 rtx element0 = XVECEXP (vals, 0, 0);
7004 if (MEM_P (element0))
7005 element0 = rs6000_address_for_fpconvert (element0);
7006 else
7007 element0 = force_reg (SImode, element0);
7009 if (TARGET_P9_VECTOR)
7010 emit_insn (gen_vsx_splat_v4si (target, element0));
7011 else
7013 rtx tmp = gen_reg_rtx (DImode);
7014 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7015 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7017 return;
7019 else
7021 rtx elements[4];
7022 size_t i;
7024 for (i = 0; i < 4; i++)
7026 elements[i] = XVECEXP (vals, 0, i);
7027 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7028 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7031 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7032 elements[2], elements[3]));
7033 return;
7037 /* With single precision floating point on VSX, know that internally single
7038 precision is actually represented as a double, and either make 2 V2DF
7039 vectors, and convert these vectors to single precision, or do one
7040 conversion, and splat the result to the other elements. */
7041 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7043 if (all_same)
7045 rtx element0 = XVECEXP (vals, 0, 0);
7047 if (TARGET_P9_VECTOR)
7049 if (MEM_P (element0))
7050 element0 = rs6000_address_for_fpconvert (element0);
7052 emit_insn (gen_vsx_splat_v4sf (target, element0));
7055 else
7057 rtx freg = gen_reg_rtx (V4SFmode);
7058 rtx sreg = force_reg (SFmode, element0);
7059 rtx cvt = (TARGET_XSCVDPSPN
7060 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7061 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7063 emit_insn (cvt);
7064 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7065 const0_rtx));
7068 else
7070 rtx dbl_even = gen_reg_rtx (V2DFmode);
7071 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7072 rtx flt_even = gen_reg_rtx (V4SFmode);
7073 rtx flt_odd = gen_reg_rtx (V4SFmode);
7074 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7075 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7076 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7077 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7079 /* Use VMRGEW if we can instead of doing a permute. */
7080 if (TARGET_P8_VECTOR)
7082 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7083 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7084 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7085 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7086 if (BYTES_BIG_ENDIAN)
7087 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7088 else
7089 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7091 else
7093 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7094 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7095 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7096 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7097 rs6000_expand_extract_even (target, flt_even, flt_odd);
7100 return;
7103 /* Special case initializing vector short/char that are splats if we are on
7104 64-bit systems with direct move. */
7105 if (all_same && TARGET_DIRECT_MOVE_64BIT
7106 && (mode == V16QImode || mode == V8HImode))
7108 rtx op0 = XVECEXP (vals, 0, 0);
7109 rtx di_tmp = gen_reg_rtx (DImode);
7111 if (!REG_P (op0))
7112 op0 = force_reg (GET_MODE_INNER (mode), op0);
7114 if (mode == V16QImode)
7116 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7117 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7118 return;
7121 if (mode == V8HImode)
7123 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7124 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7125 return;
7129 /* Store value to stack temp. Load vector element. Splat. However, splat
7130 of 64-bit items is not supported on Altivec. */
7131 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7133 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7134 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7135 XVECEXP (vals, 0, 0));
7136 x = gen_rtx_UNSPEC (VOIDmode,
7137 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7138 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7139 gen_rtvec (2,
7140 gen_rtx_SET (target, mem),
7141 x)));
7142 x = gen_rtx_VEC_SELECT (inner_mode, target,
7143 gen_rtx_PARALLEL (VOIDmode,
7144 gen_rtvec (1, const0_rtx)));
7145 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7146 return;
7149 /* One field is non-constant. Load constant then overwrite
7150 varying field. */
7151 if (n_var == 1)
7153 rtx copy = copy_rtx (vals);
7155 /* Load constant part of vector, substitute neighboring value for
7156 varying element. */
7157 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7158 rs6000_expand_vector_init (target, copy);
7160 /* Insert variable. */
7161 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7162 return;
7165 /* Construct the vector in memory one field at a time
7166 and load the whole vector. */
7167 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7168 for (i = 0; i < n_elts; i++)
7169 emit_move_insn (adjust_address_nv (mem, inner_mode,
7170 i * GET_MODE_SIZE (inner_mode)),
7171 XVECEXP (vals, 0, i));
7172 emit_move_insn (target, mem);
7175 /* Set field ELT of TARGET to VAL. */
7177 void
7178 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7180 machine_mode mode = GET_MODE (target);
7181 machine_mode inner_mode = GET_MODE_INNER (mode);
7182 rtx reg = gen_reg_rtx (mode);
7183 rtx mask, mem, x;
7184 int width = GET_MODE_SIZE (inner_mode);
7185 int i;
7187 val = force_reg (GET_MODE (val), val);
7189 if (VECTOR_MEM_VSX_P (mode))
7191 rtx insn = NULL_RTX;
7192 rtx elt_rtx = GEN_INT (elt);
7194 if (mode == V2DFmode)
7195 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7197 else if (mode == V2DImode)
7198 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7200 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7202 if (mode == V4SImode)
7203 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7204 else if (mode == V8HImode)
7205 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7206 else if (mode == V16QImode)
7207 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7208 else if (mode == V4SFmode)
7209 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7212 if (insn)
7214 emit_insn (insn);
7215 return;
7219 /* Simplify setting single element vectors like V1TImode. */
7220 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7222 emit_move_insn (target, gen_lowpart (mode, val));
7223 return;
7226 /* Load single variable value. */
7227 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7228 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7229 x = gen_rtx_UNSPEC (VOIDmode,
7230 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7231 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7232 gen_rtvec (2,
7233 gen_rtx_SET (reg, mem),
7234 x)));
7236 /* Linear sequence. */
7237 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7238 for (i = 0; i < 16; ++i)
7239 XVECEXP (mask, 0, i) = GEN_INT (i);
7241 /* Set permute mask to insert element into target. */
7242 for (i = 0; i < width; ++i)
7243 XVECEXP (mask, 0, elt*width + i)
7244 = GEN_INT (i + 0x10);
7245 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7247 if (BYTES_BIG_ENDIAN)
7248 x = gen_rtx_UNSPEC (mode,
7249 gen_rtvec (3, target, reg,
7250 force_reg (V16QImode, x)),
7251 UNSPEC_VPERM);
7252 else
7254 if (TARGET_P9_VECTOR)
7255 x = gen_rtx_UNSPEC (mode,
7256 gen_rtvec (3, target, reg,
7257 force_reg (V16QImode, x)),
7258 UNSPEC_VPERMR);
7259 else
7261 /* Invert selector. We prefer to generate VNAND on P8 so
7262 that future fusion opportunities can kick in, but must
7263 generate VNOR elsewhere. */
7264 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7265 rtx iorx = (TARGET_P8_VECTOR
7266 ? gen_rtx_IOR (V16QImode, notx, notx)
7267 : gen_rtx_AND (V16QImode, notx, notx));
7268 rtx tmp = gen_reg_rtx (V16QImode);
7269 emit_insn (gen_rtx_SET (tmp, iorx));
7271 /* Permute with operands reversed and adjusted selector. */
7272 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7273 UNSPEC_VPERM);
7277 emit_insn (gen_rtx_SET (target, x));
7280 /* Extract field ELT from VEC into TARGET. */
7282 void
7283 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7285 machine_mode mode = GET_MODE (vec);
7286 machine_mode inner_mode = GET_MODE_INNER (mode);
7287 rtx mem;
7289 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7291 switch (mode)
7293 default:
7294 break;
7295 case E_V1TImode:
7296 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7297 emit_move_insn (target, gen_lowpart (TImode, vec));
7298 break;
7299 case E_V2DFmode:
7300 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7301 return;
7302 case E_V2DImode:
7303 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7304 return;
7305 case E_V4SFmode:
7306 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7307 return;
7308 case E_V16QImode:
7309 if (TARGET_DIRECT_MOVE_64BIT)
7311 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7312 return;
7314 else
7315 break;
7316 case E_V8HImode:
7317 if (TARGET_DIRECT_MOVE_64BIT)
7319 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7320 return;
7322 else
7323 break;
7324 case E_V4SImode:
7325 if (TARGET_DIRECT_MOVE_64BIT)
7327 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7328 return;
7330 break;
7333 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7334 && TARGET_DIRECT_MOVE_64BIT)
7336 if (GET_MODE (elt) != DImode)
7338 rtx tmp = gen_reg_rtx (DImode);
7339 convert_move (tmp, elt, 0);
7340 elt = tmp;
7342 else if (!REG_P (elt))
7343 elt = force_reg (DImode, elt);
7345 switch (mode)
7347 case E_V2DFmode:
7348 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7349 return;
7351 case E_V2DImode:
7352 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7353 return;
7355 case E_V4SFmode:
7356 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7357 return;
7359 case E_V4SImode:
7360 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7361 return;
7363 case E_V8HImode:
7364 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7365 return;
7367 case E_V16QImode:
7368 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7369 return;
7371 default:
7372 gcc_unreachable ();
7376 gcc_assert (CONST_INT_P (elt));
7378 /* Allocate mode-sized buffer. */
7379 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7381 emit_move_insn (mem, vec);
7383 /* Add offset to field within buffer matching vector element. */
7384 mem = adjust_address_nv (mem, inner_mode,
7385 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7387 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7390 /* Helper function to return the register number of a RTX. */
7391 static inline int
7392 regno_or_subregno (rtx op)
7394 if (REG_P (op))
7395 return REGNO (op);
7396 else if (SUBREG_P (op))
7397 return subreg_regno (op);
7398 else
7399 gcc_unreachable ();
7402 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7403 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7404 temporary (BASE_TMP) to fixup the address. Return the new memory address
7405 that is valid for reads or writes to a given register (SCALAR_REG). */
7408 rs6000_adjust_vec_address (rtx scalar_reg,
7409 rtx mem,
7410 rtx element,
7411 rtx base_tmp,
7412 machine_mode scalar_mode)
7414 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7415 rtx addr = XEXP (mem, 0);
7416 rtx element_offset;
7417 rtx new_addr;
7418 bool valid_addr_p;
7420 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7421 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7423 /* Calculate what we need to add to the address to get the element
7424 address. */
7425 if (CONST_INT_P (element))
7426 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7427 else
7429 int byte_shift = exact_log2 (scalar_size);
7430 gcc_assert (byte_shift >= 0);
7432 if (byte_shift == 0)
7433 element_offset = element;
7435 else
7437 if (TARGET_POWERPC64)
7438 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7439 else
7440 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7442 element_offset = base_tmp;
7446 /* Create the new address pointing to the element within the vector. If we
7447 are adding 0, we don't have to change the address. */
7448 if (element_offset == const0_rtx)
7449 new_addr = addr;
7451 /* A simple indirect address can be converted into a reg + offset
7452 address. */
7453 else if (REG_P (addr) || SUBREG_P (addr))
7454 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7456 /* Optimize D-FORM addresses with constant offset with a constant element, to
7457 include the element offset in the address directly. */
7458 else if (GET_CODE (addr) == PLUS)
7460 rtx op0 = XEXP (addr, 0);
7461 rtx op1 = XEXP (addr, 1);
7462 rtx insn;
7464 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7465 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7467 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7468 rtx offset_rtx = GEN_INT (offset);
7470 if (IN_RANGE (offset, -32768, 32767)
7471 && (scalar_size < 8 || (offset & 0x3) == 0))
7472 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7473 else
7475 emit_move_insn (base_tmp, offset_rtx);
7476 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7479 else
7481 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7482 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7484 /* Note, ADDI requires the register being added to be a base
7485 register. If the register was R0, load it up into the temporary
7486 and do the add. */
7487 if (op1_reg_p
7488 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7490 insn = gen_add3_insn (base_tmp, op1, element_offset);
7491 gcc_assert (insn != NULL_RTX);
7492 emit_insn (insn);
7495 else if (ele_reg_p
7496 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7498 insn = gen_add3_insn (base_tmp, element_offset, op1);
7499 gcc_assert (insn != NULL_RTX);
7500 emit_insn (insn);
7503 else
7505 emit_move_insn (base_tmp, op1);
7506 emit_insn (gen_add2_insn (base_tmp, element_offset));
7509 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7513 else
7515 emit_move_insn (base_tmp, addr);
7516 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7519 /* If we have a PLUS, we need to see whether the particular register class
7520 allows for D-FORM or X-FORM addressing. */
7521 if (GET_CODE (new_addr) == PLUS)
7523 rtx op1 = XEXP (new_addr, 1);
7524 addr_mask_type addr_mask;
7525 int scalar_regno = regno_or_subregno (scalar_reg);
7527 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7528 if (INT_REGNO_P (scalar_regno))
7529 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7531 else if (FP_REGNO_P (scalar_regno))
7532 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7534 else if (ALTIVEC_REGNO_P (scalar_regno))
7535 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7537 else
7538 gcc_unreachable ();
7540 if (REG_P (op1) || SUBREG_P (op1))
7541 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7542 else
7543 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7546 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7547 valid_addr_p = true;
7549 else
7550 valid_addr_p = false;
7552 if (!valid_addr_p)
7554 emit_move_insn (base_tmp, new_addr);
7555 new_addr = base_tmp;
7558 return change_address (mem, scalar_mode, new_addr);
7561 /* Split a variable vec_extract operation into the component instructions. */
7563 void
7564 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7565 rtx tmp_altivec)
7567 machine_mode mode = GET_MODE (src);
7568 machine_mode scalar_mode = GET_MODE (dest);
7569 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7570 int byte_shift = exact_log2 (scalar_size);
7572 gcc_assert (byte_shift >= 0);
7574 /* If we are given a memory address, optimize to load just the element. We
7575 don't have to adjust the vector element number on little endian
7576 systems. */
7577 if (MEM_P (src))
7579 gcc_assert (REG_P (tmp_gpr));
7580 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7581 tmp_gpr, scalar_mode));
7582 return;
7585 else if (REG_P (src) || SUBREG_P (src))
7587 int bit_shift = byte_shift + 3;
7588 rtx element2;
7589 int dest_regno = regno_or_subregno (dest);
7590 int src_regno = regno_or_subregno (src);
7591 int element_regno = regno_or_subregno (element);
7593 gcc_assert (REG_P (tmp_gpr));
7595 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7596 a general purpose register. */
7597 if (TARGET_P9_VECTOR
7598 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7599 && INT_REGNO_P (dest_regno)
7600 && ALTIVEC_REGNO_P (src_regno)
7601 && INT_REGNO_P (element_regno))
7603 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7604 rtx element_si = gen_rtx_REG (SImode, element_regno);
7606 if (mode == V16QImode)
7607 emit_insn (VECTOR_ELT_ORDER_BIG
7608 ? gen_vextublx (dest_si, element_si, src)
7609 : gen_vextubrx (dest_si, element_si, src));
7611 else if (mode == V8HImode)
7613 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7614 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7615 emit_insn (VECTOR_ELT_ORDER_BIG
7616 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7617 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7621 else
7623 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7624 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7625 emit_insn (VECTOR_ELT_ORDER_BIG
7626 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7627 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7630 return;
7634 gcc_assert (REG_P (tmp_altivec));
7636 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7637 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7638 will shift the element into the upper position (adding 3 to convert a
7639 byte shift into a bit shift). */
7640 if (scalar_size == 8)
7642 if (!VECTOR_ELT_ORDER_BIG)
7644 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7645 element2 = tmp_gpr;
7647 else
7648 element2 = element;
7650 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7651 bit. */
7652 emit_insn (gen_rtx_SET (tmp_gpr,
7653 gen_rtx_AND (DImode,
7654 gen_rtx_ASHIFT (DImode,
7655 element2,
7656 GEN_INT (6)),
7657 GEN_INT (64))));
7659 else
7661 if (!VECTOR_ELT_ORDER_BIG)
7663 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7665 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7666 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7667 element2 = tmp_gpr;
7669 else
7670 element2 = element;
7672 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7675 /* Get the value into the lower byte of the Altivec register where VSLO
7676 expects it. */
7677 if (TARGET_P9_VECTOR)
7678 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7679 else if (can_create_pseudo_p ())
7680 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7681 else
7683 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7684 emit_move_insn (tmp_di, tmp_gpr);
7685 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7688 /* Do the VSLO to get the value into the final location. */
7689 switch (mode)
7691 case E_V2DFmode:
7692 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7693 return;
7695 case E_V2DImode:
7696 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7697 return;
7699 case E_V4SFmode:
7701 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7702 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7703 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7704 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7705 tmp_altivec));
7707 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7708 return;
7711 case E_V4SImode:
7712 case E_V8HImode:
7713 case E_V16QImode:
7715 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7716 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7717 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7718 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7719 tmp_altivec));
7720 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7721 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7722 GEN_INT (64 - (8 * scalar_size))));
7723 return;
7726 default:
7727 gcc_unreachable ();
7730 return;
7732 else
7733 gcc_unreachable ();
7736 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7737 two SImode values. */
7739 static void
7740 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7742 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7744 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7746 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7747 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7749 emit_move_insn (dest, GEN_INT (const1 | const2));
7750 return;
7753 /* Put si1 into upper 32-bits of dest. */
7754 if (CONST_INT_P (si1))
7755 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7756 else
7758 /* Generate RLDIC. */
7759 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7760 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7761 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7762 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7763 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7764 emit_insn (gen_rtx_SET (dest, and_rtx));
7767 /* Put si2 into the temporary. */
7768 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7769 if (CONST_INT_P (si2))
7770 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7771 else
7772 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7774 /* Combine the two parts. */
7775 emit_insn (gen_iordi3 (dest, dest, tmp));
7776 return;
7779 /* Split a V4SI initialization. */
7781 void
7782 rs6000_split_v4si_init (rtx operands[])
7784 rtx dest = operands[0];
7786 /* Destination is a GPR, build up the two DImode parts in place. */
7787 if (REG_P (dest) || SUBREG_P (dest))
7789 int d_regno = regno_or_subregno (dest);
7790 rtx scalar1 = operands[1];
7791 rtx scalar2 = operands[2];
7792 rtx scalar3 = operands[3];
7793 rtx scalar4 = operands[4];
7794 rtx tmp1 = operands[5];
7795 rtx tmp2 = operands[6];
7797 /* Even though we only need one temporary (plus the destination, which
7798 has an early clobber constraint, try to use two temporaries, one for
7799 each double word created. That way the 2nd insn scheduling pass can
7800 rearrange things so the two parts are done in parallel. */
7801 if (BYTES_BIG_ENDIAN)
7803 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7804 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7805 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7806 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7808 else
7810 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7811 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7812 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7813 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7814 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7816 return;
7819 else
7820 gcc_unreachable ();
7823 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7824 selects whether the alignment is abi mandated, optional, or
7825 both abi and optional alignment. */
7827 unsigned int
7828 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7830 if (how != align_opt)
7832 if (TREE_CODE (type) == VECTOR_TYPE)
7834 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7836 if (align < 64)
7837 align = 64;
7839 else if (align < 128)
7840 align = 128;
7844 if (how != align_abi)
7846 if (TREE_CODE (type) == ARRAY_TYPE
7847 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7849 if (align < BITS_PER_WORD)
7850 align = BITS_PER_WORD;
7854 return align;
7857 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7859 bool
7860 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7862 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7864 if (computed != 128)
7866 static bool warned;
7867 if (!warned && warn_psabi)
7869 warned = true;
7870 inform (input_location,
7871 "the layout of aggregates containing vectors with"
7872 " %d-byte alignment has changed in GCC 5",
7873 computed / BITS_PER_UNIT);
7876 /* In current GCC there is no special case. */
7877 return false;
7880 return false;
7883 /* AIX increases natural record alignment to doubleword if the first
7884 field is an FP double while the FP fields remain word aligned. */
7886 unsigned int
7887 rs6000_special_round_type_align (tree type, unsigned int computed,
7888 unsigned int specified)
7890 unsigned int align = MAX (computed, specified);
7891 tree field = TYPE_FIELDS (type);
7893 /* Skip all non field decls */
7894 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7895 field = DECL_CHAIN (field);
7897 if (field != NULL && field != type)
7899 type = TREE_TYPE (field);
7900 while (TREE_CODE (type) == ARRAY_TYPE)
7901 type = TREE_TYPE (type);
7903 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7904 align = MAX (align, 64);
7907 return align;
7910 /* Darwin increases record alignment to the natural alignment of
7911 the first field. */
7913 unsigned int
7914 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7915 unsigned int specified)
7917 unsigned int align = MAX (computed, specified);
7919 if (TYPE_PACKED (type))
7920 return align;
7922 /* Find the first field, looking down into aggregates. */
7923 do {
7924 tree field = TYPE_FIELDS (type);
7925 /* Skip all non field decls */
7926 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7927 field = DECL_CHAIN (field);
7928 if (! field)
7929 break;
7930 /* A packed field does not contribute any extra alignment. */
7931 if (DECL_PACKED (field))
7932 return align;
7933 type = TREE_TYPE (field);
7934 while (TREE_CODE (type) == ARRAY_TYPE)
7935 type = TREE_TYPE (type);
7936 } while (AGGREGATE_TYPE_P (type));
7938 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7939 align = MAX (align, TYPE_ALIGN (type));
7941 return align;
7944 /* Return 1 for an operand in small memory on V.4/eabi. */
7947 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7948 machine_mode mode ATTRIBUTE_UNUSED)
7950 #if TARGET_ELF
7951 rtx sym_ref;
7953 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7954 return 0;
7956 if (DEFAULT_ABI != ABI_V4)
7957 return 0;
7959 if (GET_CODE (op) == SYMBOL_REF)
7960 sym_ref = op;
7962 else if (GET_CODE (op) != CONST
7963 || GET_CODE (XEXP (op, 0)) != PLUS
7964 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7965 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7966 return 0;
7968 else
7970 rtx sum = XEXP (op, 0);
7971 HOST_WIDE_INT summand;
7973 /* We have to be careful here, because it is the referenced address
7974 that must be 32k from _SDA_BASE_, not just the symbol. */
7975 summand = INTVAL (XEXP (sum, 1));
7976 if (summand < 0 || summand > g_switch_value)
7977 return 0;
7979 sym_ref = XEXP (sum, 0);
7982 return SYMBOL_REF_SMALL_P (sym_ref);
7983 #else
7984 return 0;
7985 #endif
7988 /* Return true if either operand is a general purpose register. */
7990 bool
7991 gpr_or_gpr_p (rtx op0, rtx op1)
7993 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7994 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7997 /* Return true if this is a move direct operation between GPR registers and
7998 floating point/VSX registers. */
8000 bool
8001 direct_move_p (rtx op0, rtx op1)
8003 int regno0, regno1;
8005 if (!REG_P (op0) || !REG_P (op1))
8006 return false;
8008 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8009 return false;
8011 regno0 = REGNO (op0);
8012 regno1 = REGNO (op1);
8013 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8014 return false;
8016 if (INT_REGNO_P (regno0))
8017 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8019 else if (INT_REGNO_P (regno1))
8021 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8022 return true;
8024 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8025 return true;
8028 return false;
8031 /* Return true if the OFFSET is valid for the quad address instructions that
8032 use d-form (register + offset) addressing. */
8034 static inline bool
8035 quad_address_offset_p (HOST_WIDE_INT offset)
8037 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8040 /* Return true if the ADDR is an acceptable address for a quad memory
8041 operation of mode MODE (either LQ/STQ for general purpose registers, or
8042 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8043 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8044 3.0 LXV/STXV instruction. */
8046 bool
8047 quad_address_p (rtx addr, machine_mode mode, bool strict)
8049 rtx op0, op1;
8051 if (GET_MODE_SIZE (mode) != 16)
8052 return false;
8054 if (legitimate_indirect_address_p (addr, strict))
8055 return true;
8057 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8058 return false;
8060 if (GET_CODE (addr) != PLUS)
8061 return false;
8063 op0 = XEXP (addr, 0);
8064 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8065 return false;
8067 op1 = XEXP (addr, 1);
8068 if (!CONST_INT_P (op1))
8069 return false;
8071 return quad_address_offset_p (INTVAL (op1));
8074 /* Return true if this is a load or store quad operation. This function does
8075 not handle the atomic quad memory instructions. */
8077 bool
8078 quad_load_store_p (rtx op0, rtx op1)
8080 bool ret;
8082 if (!TARGET_QUAD_MEMORY)
8083 ret = false;
8085 else if (REG_P (op0) && MEM_P (op1))
8086 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8087 && quad_memory_operand (op1, GET_MODE (op1))
8088 && !reg_overlap_mentioned_p (op0, op1));
8090 else if (MEM_P (op0) && REG_P (op1))
8091 ret = (quad_memory_operand (op0, GET_MODE (op0))
8092 && quad_int_reg_operand (op1, GET_MODE (op1)));
8094 else
8095 ret = false;
8097 if (TARGET_DEBUG_ADDR)
8099 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8100 ret ? "true" : "false");
8101 debug_rtx (gen_rtx_SET (op0, op1));
8104 return ret;
8107 /* Given an address, return a constant offset term if one exists. */
8109 static rtx
8110 address_offset (rtx op)
8112 if (GET_CODE (op) == PRE_INC
8113 || GET_CODE (op) == PRE_DEC)
8114 op = XEXP (op, 0);
8115 else if (GET_CODE (op) == PRE_MODIFY
8116 || GET_CODE (op) == LO_SUM)
8117 op = XEXP (op, 1);
8119 if (GET_CODE (op) == CONST)
8120 op = XEXP (op, 0);
8122 if (GET_CODE (op) == PLUS)
8123 op = XEXP (op, 1);
8125 if (CONST_INT_P (op))
8126 return op;
8128 return NULL_RTX;
8131 /* Return true if the MEM operand is a memory operand suitable for use
8132 with a (full width, possibly multiple) gpr load/store. On
8133 powerpc64 this means the offset must be divisible by 4.
8134 Implements 'Y' constraint.
8136 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8137 a constraint function we know the operand has satisfied a suitable
8138 memory predicate. Also accept some odd rtl generated by reload
8139 (see rs6000_legitimize_reload_address for various forms). It is
8140 important that reload rtl be accepted by appropriate constraints
8141 but not by the operand predicate.
8143 Offsetting a lo_sum should not be allowed, except where we know by
8144 alignment that a 32k boundary is not crossed, but see the ???
8145 comment in rs6000_legitimize_reload_address. Note that by
8146 "offsetting" here we mean a further offset to access parts of the
8147 MEM. It's fine to have a lo_sum where the inner address is offset
8148 from a sym, since the same sym+offset will appear in the high part
8149 of the address calculation. */
8151 bool
8152 mem_operand_gpr (rtx op, machine_mode mode)
8154 unsigned HOST_WIDE_INT offset;
8155 int extra;
8156 rtx addr = XEXP (op, 0);
8158 op = address_offset (addr);
8159 if (op == NULL_RTX)
8160 return true;
8162 offset = INTVAL (op);
8163 if (TARGET_POWERPC64 && (offset & 3) != 0)
8164 return false;
8166 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8167 if (extra < 0)
8168 extra = 0;
8170 if (GET_CODE (addr) == LO_SUM)
8171 /* For lo_sum addresses, we must allow any offset except one that
8172 causes a wrap, so test only the low 16 bits. */
8173 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8175 return offset + 0x8000 < 0x10000u - extra;
8178 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8179 enforce an offset divisible by 4 even for 32-bit. */
8181 bool
8182 mem_operand_ds_form (rtx op, machine_mode mode)
8184 unsigned HOST_WIDE_INT offset;
8185 int extra;
8186 rtx addr = XEXP (op, 0);
8188 if (!offsettable_address_p (false, mode, addr))
8189 return false;
8191 op = address_offset (addr);
8192 if (op == NULL_RTX)
8193 return true;
8195 offset = INTVAL (op);
8196 if ((offset & 3) != 0)
8197 return false;
8199 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8200 if (extra < 0)
8201 extra = 0;
8203 if (GET_CODE (addr) == LO_SUM)
8204 /* For lo_sum addresses, we must allow any offset except one that
8205 causes a wrap, so test only the low 16 bits. */
8206 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8208 return offset + 0x8000 < 0x10000u - extra;
8211 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8213 static bool
8214 reg_offset_addressing_ok_p (machine_mode mode)
8216 switch (mode)
8218 case E_V16QImode:
8219 case E_V8HImode:
8220 case E_V4SFmode:
8221 case E_V4SImode:
8222 case E_V2DFmode:
8223 case E_V2DImode:
8224 case E_V1TImode:
8225 case E_TImode:
8226 case E_TFmode:
8227 case E_KFmode:
8228 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8229 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8230 a vector mode, if we want to use the VSX registers to move it around,
8231 we need to restrict ourselves to reg+reg addressing. Similarly for
8232 IEEE 128-bit floating point that is passed in a single vector
8233 register. */
8234 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8235 return mode_supports_vsx_dform_quad (mode);
8236 break;
8238 case E_V2SImode:
8239 case E_V2SFmode:
8240 /* Paired vector modes. Only reg+reg addressing is valid. */
8241 if (TARGET_PAIRED_FLOAT)
8242 return false;
8243 break;
8245 case E_SDmode:
8246 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8247 addressing for the LFIWZX and STFIWX instructions. */
8248 if (TARGET_NO_SDMODE_STACK)
8249 return false;
8250 break;
8252 default:
8253 break;
8256 return true;
8259 static bool
8260 virtual_stack_registers_memory_p (rtx op)
8262 int regnum;
8264 if (GET_CODE (op) == REG)
8265 regnum = REGNO (op);
8267 else if (GET_CODE (op) == PLUS
8268 && GET_CODE (XEXP (op, 0)) == REG
8269 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8270 regnum = REGNO (XEXP (op, 0));
8272 else
8273 return false;
8275 return (regnum >= FIRST_VIRTUAL_REGISTER
8276 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8279 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8280 is known to not straddle a 32k boundary. This function is used
8281 to determine whether -mcmodel=medium code can use TOC pointer
8282 relative addressing for OP. This means the alignment of the TOC
8283 pointer must also be taken into account, and unfortunately that is
8284 only 8 bytes. */
8286 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8287 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8288 #endif
8290 static bool
8291 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8292 machine_mode mode)
8294 tree decl;
8295 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8297 if (GET_CODE (op) != SYMBOL_REF)
8298 return false;
8300 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8301 SYMBOL_REF. */
8302 if (mode_supports_vsx_dform_quad (mode))
8303 return false;
8305 dsize = GET_MODE_SIZE (mode);
8306 decl = SYMBOL_REF_DECL (op);
8307 if (!decl)
8309 if (dsize == 0)
8310 return false;
8312 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8313 replacing memory addresses with an anchor plus offset. We
8314 could find the decl by rummaging around in the block->objects
8315 VEC for the given offset but that seems like too much work. */
8316 dalign = BITS_PER_UNIT;
8317 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8318 && SYMBOL_REF_ANCHOR_P (op)
8319 && SYMBOL_REF_BLOCK (op) != NULL)
8321 struct object_block *block = SYMBOL_REF_BLOCK (op);
8323 dalign = block->alignment;
8324 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8326 else if (CONSTANT_POOL_ADDRESS_P (op))
8328 /* It would be nice to have get_pool_align().. */
8329 machine_mode cmode = get_pool_mode (op);
8331 dalign = GET_MODE_ALIGNMENT (cmode);
8334 else if (DECL_P (decl))
8336 dalign = DECL_ALIGN (decl);
8338 if (dsize == 0)
8340 /* Allow BLKmode when the entire object is known to not
8341 cross a 32k boundary. */
8342 if (!DECL_SIZE_UNIT (decl))
8343 return false;
8345 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8346 return false;
8348 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8349 if (dsize > 32768)
8350 return false;
8352 dalign /= BITS_PER_UNIT;
8353 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8354 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8355 return dalign >= dsize;
8358 else
8359 gcc_unreachable ();
8361 /* Find how many bits of the alignment we know for this access. */
8362 dalign /= BITS_PER_UNIT;
8363 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8364 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8365 mask = dalign - 1;
8366 lsb = offset & -offset;
8367 mask &= lsb - 1;
8368 dalign = mask + 1;
8370 return dalign >= dsize;
8373 static bool
8374 constant_pool_expr_p (rtx op)
8376 rtx base, offset;
8378 split_const (op, &base, &offset);
8379 return (GET_CODE (base) == SYMBOL_REF
8380 && CONSTANT_POOL_ADDRESS_P (base)
8381 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8384 /* These are only used to pass through from print_operand/print_operand_address
8385 to rs6000_output_addr_const_extra over the intervening function
8386 output_addr_const which is not target code. */
8387 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8389 /* Return true if OP is a toc pointer relative address (the output
8390 of create_TOC_reference). If STRICT, do not match non-split
8391 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8392 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8393 TOCREL_OFFSET_RET respectively. */
8395 bool
8396 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8397 const_rtx *tocrel_offset_ret)
8399 if (!TARGET_TOC)
8400 return false;
8402 if (TARGET_CMODEL != CMODEL_SMALL)
8404 /* When strict ensure we have everything tidy. */
8405 if (strict
8406 && !(GET_CODE (op) == LO_SUM
8407 && REG_P (XEXP (op, 0))
8408 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8409 return false;
8411 /* When not strict, allow non-split TOC addresses and also allow
8412 (lo_sum (high ..)) TOC addresses created during reload. */
8413 if (GET_CODE (op) == LO_SUM)
8414 op = XEXP (op, 1);
8417 const_rtx tocrel_base = op;
8418 const_rtx tocrel_offset = const0_rtx;
8420 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8422 tocrel_base = XEXP (op, 0);
8423 tocrel_offset = XEXP (op, 1);
8426 if (tocrel_base_ret)
8427 *tocrel_base_ret = tocrel_base;
8428 if (tocrel_offset_ret)
8429 *tocrel_offset_ret = tocrel_offset;
8431 return (GET_CODE (tocrel_base) == UNSPEC
8432 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8435 /* Return true if X is a constant pool address, and also for cmodel=medium
8436 if X is a toc-relative address known to be offsettable within MODE. */
8438 bool
8439 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8440 bool strict)
8442 const_rtx tocrel_base, tocrel_offset;
8443 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8444 && (TARGET_CMODEL != CMODEL_MEDIUM
8445 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8446 || mode == QImode
8447 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8448 INTVAL (tocrel_offset), mode)));
8451 static bool
8452 legitimate_small_data_p (machine_mode mode, rtx x)
8454 return (DEFAULT_ABI == ABI_V4
8455 && !flag_pic && !TARGET_TOC
8456 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8457 && small_data_operand (x, mode));
8460 bool
8461 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8462 bool strict, bool worst_case)
8464 unsigned HOST_WIDE_INT offset;
8465 unsigned int extra;
8467 if (GET_CODE (x) != PLUS)
8468 return false;
8469 if (!REG_P (XEXP (x, 0)))
8470 return false;
8471 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8472 return false;
8473 if (mode_supports_vsx_dform_quad (mode))
8474 return quad_address_p (x, mode, strict);
8475 if (!reg_offset_addressing_ok_p (mode))
8476 return virtual_stack_registers_memory_p (x);
8477 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8478 return true;
8479 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8480 return false;
8482 offset = INTVAL (XEXP (x, 1));
8483 extra = 0;
8484 switch (mode)
8486 case E_V2SImode:
8487 case E_V2SFmode:
8488 /* Paired single modes: offset addressing isn't valid. */
8489 return false;
8491 case E_DFmode:
8492 case E_DDmode:
8493 case E_DImode:
8494 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8495 addressing. */
8496 if (VECTOR_MEM_VSX_P (mode))
8497 return false;
8499 if (!worst_case)
8500 break;
8501 if (!TARGET_POWERPC64)
8502 extra = 4;
8503 else if (offset & 3)
8504 return false;
8505 break;
8507 case E_TFmode:
8508 case E_IFmode:
8509 case E_KFmode:
8510 case E_TDmode:
8511 case E_TImode:
8512 case E_PTImode:
8513 extra = 8;
8514 if (!worst_case)
8515 break;
8516 if (!TARGET_POWERPC64)
8517 extra = 12;
8518 else if (offset & 3)
8519 return false;
8520 break;
8522 default:
8523 break;
8526 offset += 0x8000;
8527 return offset < 0x10000 - extra;
8530 bool
8531 legitimate_indexed_address_p (rtx x, int strict)
8533 rtx op0, op1;
8535 if (GET_CODE (x) != PLUS)
8536 return false;
8538 op0 = XEXP (x, 0);
8539 op1 = XEXP (x, 1);
8541 return (REG_P (op0) && REG_P (op1)
8542 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8543 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8544 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8545 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8548 bool
8549 avoiding_indexed_address_p (machine_mode mode)
8551 /* Avoid indexed addressing for modes that have non-indexed
8552 load/store instruction forms. */
8553 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8556 bool
8557 legitimate_indirect_address_p (rtx x, int strict)
8559 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8562 bool
8563 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8565 if (!TARGET_MACHO || !flag_pic
8566 || mode != SImode || GET_CODE (x) != MEM)
8567 return false;
8568 x = XEXP (x, 0);
8570 if (GET_CODE (x) != LO_SUM)
8571 return false;
8572 if (GET_CODE (XEXP (x, 0)) != REG)
8573 return false;
8574 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8575 return false;
8576 x = XEXP (x, 1);
8578 return CONSTANT_P (x);
8581 static bool
8582 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8584 if (GET_CODE (x) != LO_SUM)
8585 return false;
8586 if (GET_CODE (XEXP (x, 0)) != REG)
8587 return false;
8588 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8589 return false;
8590 /* quad word addresses are restricted, and we can't use LO_SUM. */
8591 if (mode_supports_vsx_dform_quad (mode))
8592 return false;
8593 x = XEXP (x, 1);
8595 if (TARGET_ELF || TARGET_MACHO)
8597 bool large_toc_ok;
8599 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8600 return false;
8601 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8602 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8603 recognizes some LO_SUM addresses as valid although this
8604 function says opposite. In most cases, LRA through different
8605 transformations can generate correct code for address reloads.
8606 It can not manage only some LO_SUM cases. So we need to add
8607 code analogous to one in rs6000_legitimize_reload_address for
8608 LOW_SUM here saying that some addresses are still valid. */
8609 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8610 && small_toc_ref (x, VOIDmode));
8611 if (TARGET_TOC && ! large_toc_ok)
8612 return false;
8613 if (GET_MODE_NUNITS (mode) != 1)
8614 return false;
8615 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8616 && !(/* ??? Assume floating point reg based on mode? */
8617 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8618 && (mode == DFmode || mode == DDmode)))
8619 return false;
8621 return CONSTANT_P (x) || large_toc_ok;
8624 return false;
8628 /* Try machine-dependent ways of modifying an illegitimate address
8629 to be legitimate. If we find one, return the new, valid address.
8630 This is used from only one place: `memory_address' in explow.c.
8632 OLDX is the address as it was before break_out_memory_refs was
8633 called. In some cases it is useful to look at this to decide what
8634 needs to be done.
8636 It is always safe for this function to do nothing. It exists to
8637 recognize opportunities to optimize the output.
8639 On RS/6000, first check for the sum of a register with a constant
8640 integer that is out of range. If so, generate code to add the
8641 constant with the low-order 16 bits masked to the register and force
8642 this result into another register (this can be done with `cau').
8643 Then generate an address of REG+(CONST&0xffff), allowing for the
8644 possibility of bit 16 being a one.
8646 Then check for the sum of a register and something not constant, try to
8647 load the other things into a register and return the sum. */
8649 static rtx
8650 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8651 machine_mode mode)
8653 unsigned int extra;
8655 if (!reg_offset_addressing_ok_p (mode)
8656 || mode_supports_vsx_dform_quad (mode))
8658 if (virtual_stack_registers_memory_p (x))
8659 return x;
8661 /* In theory we should not be seeing addresses of the form reg+0,
8662 but just in case it is generated, optimize it away. */
8663 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8664 return force_reg (Pmode, XEXP (x, 0));
8666 /* For TImode with load/store quad, restrict addresses to just a single
8667 pointer, so it works with both GPRs and VSX registers. */
8668 /* Make sure both operands are registers. */
8669 else if (GET_CODE (x) == PLUS
8670 && (mode != TImode || !TARGET_VSX))
8671 return gen_rtx_PLUS (Pmode,
8672 force_reg (Pmode, XEXP (x, 0)),
8673 force_reg (Pmode, XEXP (x, 1)));
8674 else
8675 return force_reg (Pmode, x);
8677 if (GET_CODE (x) == SYMBOL_REF)
8679 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8680 if (model != 0)
8681 return rs6000_legitimize_tls_address (x, model);
8684 extra = 0;
8685 switch (mode)
8687 case E_TFmode:
8688 case E_TDmode:
8689 case E_TImode:
8690 case E_PTImode:
8691 case E_IFmode:
8692 case E_KFmode:
8693 /* As in legitimate_offset_address_p we do not assume
8694 worst-case. The mode here is just a hint as to the registers
8695 used. A TImode is usually in gprs, but may actually be in
8696 fprs. Leave worst-case scenario for reload to handle via
8697 insn constraints. PTImode is only GPRs. */
8698 extra = 8;
8699 break;
8700 default:
8701 break;
8704 if (GET_CODE (x) == PLUS
8705 && GET_CODE (XEXP (x, 0)) == REG
8706 && GET_CODE (XEXP (x, 1)) == CONST_INT
8707 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8708 >= 0x10000 - extra)
8709 && !PAIRED_VECTOR_MODE (mode))
8711 HOST_WIDE_INT high_int, low_int;
8712 rtx sum;
8713 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8714 if (low_int >= 0x8000 - extra)
8715 low_int = 0;
8716 high_int = INTVAL (XEXP (x, 1)) - low_int;
8717 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8718 GEN_INT (high_int)), 0);
8719 return plus_constant (Pmode, sum, low_int);
8721 else if (GET_CODE (x) == PLUS
8722 && GET_CODE (XEXP (x, 0)) == REG
8723 && GET_CODE (XEXP (x, 1)) != CONST_INT
8724 && GET_MODE_NUNITS (mode) == 1
8725 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8726 || (/* ??? Assume floating point reg based on mode? */
8727 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8728 && (mode == DFmode || mode == DDmode)))
8729 && !avoiding_indexed_address_p (mode))
8731 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8732 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8734 else if (PAIRED_VECTOR_MODE (mode))
8736 if (mode == DImode)
8737 return x;
8738 /* We accept [reg + reg]. */
8740 if (GET_CODE (x) == PLUS)
8742 rtx op1 = XEXP (x, 0);
8743 rtx op2 = XEXP (x, 1);
8744 rtx y;
8746 op1 = force_reg (Pmode, op1);
8747 op2 = force_reg (Pmode, op2);
8749 /* We can't always do [reg + reg] for these, because [reg +
8750 reg + offset] is not a legitimate addressing mode. */
8751 y = gen_rtx_PLUS (Pmode, op1, op2);
8753 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8754 return force_reg (Pmode, y);
8755 else
8756 return y;
8759 return force_reg (Pmode, x);
8761 else if ((TARGET_ELF
8762 #if TARGET_MACHO
8763 || !MACHO_DYNAMIC_NO_PIC_P
8764 #endif
8766 && TARGET_32BIT
8767 && TARGET_NO_TOC
8768 && ! flag_pic
8769 && GET_CODE (x) != CONST_INT
8770 && GET_CODE (x) != CONST_WIDE_INT
8771 && GET_CODE (x) != CONST_DOUBLE
8772 && CONSTANT_P (x)
8773 && GET_MODE_NUNITS (mode) == 1
8774 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8775 || (/* ??? Assume floating point reg based on mode? */
8776 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8777 && (mode == DFmode || mode == DDmode))))
8779 rtx reg = gen_reg_rtx (Pmode);
8780 if (TARGET_ELF)
8781 emit_insn (gen_elf_high (reg, x));
8782 else
8783 emit_insn (gen_macho_high (reg, x));
8784 return gen_rtx_LO_SUM (Pmode, reg, x);
8786 else if (TARGET_TOC
8787 && GET_CODE (x) == SYMBOL_REF
8788 && constant_pool_expr_p (x)
8789 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8790 return create_TOC_reference (x, NULL_RTX);
8791 else
8792 return x;
8795 /* Debug version of rs6000_legitimize_address. */
8796 static rtx
8797 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8799 rtx ret;
8800 rtx_insn *insns;
8802 start_sequence ();
8803 ret = rs6000_legitimize_address (x, oldx, mode);
8804 insns = get_insns ();
8805 end_sequence ();
8807 if (ret != x)
8809 fprintf (stderr,
8810 "\nrs6000_legitimize_address: mode %s, old code %s, "
8811 "new code %s, modified\n",
8812 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8813 GET_RTX_NAME (GET_CODE (ret)));
8815 fprintf (stderr, "Original address:\n");
8816 debug_rtx (x);
8818 fprintf (stderr, "oldx:\n");
8819 debug_rtx (oldx);
8821 fprintf (stderr, "New address:\n");
8822 debug_rtx (ret);
8824 if (insns)
8826 fprintf (stderr, "Insns added:\n");
8827 debug_rtx_list (insns, 20);
8830 else
8832 fprintf (stderr,
8833 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8834 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8836 debug_rtx (x);
8839 if (insns)
8840 emit_insn (insns);
8842 return ret;
8845 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8846 We need to emit DTP-relative relocations. */
8848 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8849 static void
8850 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8852 switch (size)
8854 case 4:
8855 fputs ("\t.long\t", file);
8856 break;
8857 case 8:
8858 fputs (DOUBLE_INT_ASM_OP, file);
8859 break;
8860 default:
8861 gcc_unreachable ();
8863 output_addr_const (file, x);
8864 if (TARGET_ELF)
8865 fputs ("@dtprel+0x8000", file);
8866 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8868 switch (SYMBOL_REF_TLS_MODEL (x))
8870 case 0:
8871 break;
8872 case TLS_MODEL_LOCAL_EXEC:
8873 fputs ("@le", file);
8874 break;
8875 case TLS_MODEL_INITIAL_EXEC:
8876 fputs ("@ie", file);
8877 break;
8878 case TLS_MODEL_GLOBAL_DYNAMIC:
8879 case TLS_MODEL_LOCAL_DYNAMIC:
8880 fputs ("@m", file);
8881 break;
8882 default:
8883 gcc_unreachable ();
8888 /* Return true if X is a symbol that refers to real (rather than emulated)
8889 TLS. */
8891 static bool
8892 rs6000_real_tls_symbol_ref_p (rtx x)
8894 return (GET_CODE (x) == SYMBOL_REF
8895 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8898 /* In the name of slightly smaller debug output, and to cater to
8899 general assembler lossage, recognize various UNSPEC sequences
8900 and turn them back into a direct symbol reference. */
8902 static rtx
8903 rs6000_delegitimize_address (rtx orig_x)
8905 rtx x, y, offset;
8907 orig_x = delegitimize_mem_from_attrs (orig_x);
8908 x = orig_x;
8909 if (MEM_P (x))
8910 x = XEXP (x, 0);
8912 y = x;
8913 if (TARGET_CMODEL != CMODEL_SMALL
8914 && GET_CODE (y) == LO_SUM)
8915 y = XEXP (y, 1);
8917 offset = NULL_RTX;
8918 if (GET_CODE (y) == PLUS
8919 && GET_MODE (y) == Pmode
8920 && CONST_INT_P (XEXP (y, 1)))
8922 offset = XEXP (y, 1);
8923 y = XEXP (y, 0);
8926 if (GET_CODE (y) == UNSPEC
8927 && XINT (y, 1) == UNSPEC_TOCREL)
8929 y = XVECEXP (y, 0, 0);
8931 #ifdef HAVE_AS_TLS
8932 /* Do not associate thread-local symbols with the original
8933 constant pool symbol. */
8934 if (TARGET_XCOFF
8935 && GET_CODE (y) == SYMBOL_REF
8936 && CONSTANT_POOL_ADDRESS_P (y)
8937 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8938 return orig_x;
8939 #endif
8941 if (offset != NULL_RTX)
8942 y = gen_rtx_PLUS (Pmode, y, offset);
8943 if (!MEM_P (orig_x))
8944 return y;
8945 else
8946 return replace_equiv_address_nv (orig_x, y);
8949 if (TARGET_MACHO
8950 && GET_CODE (orig_x) == LO_SUM
8951 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8953 y = XEXP (XEXP (orig_x, 1), 0);
8954 if (GET_CODE (y) == UNSPEC
8955 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8956 return XVECEXP (y, 0, 0);
8959 return orig_x;
8962 /* Return true if X shouldn't be emitted into the debug info.
8963 The linker doesn't like .toc section references from
8964 .debug_* sections, so reject .toc section symbols. */
8966 static bool
8967 rs6000_const_not_ok_for_debug_p (rtx x)
8969 if (GET_CODE (x) == SYMBOL_REF
8970 && CONSTANT_POOL_ADDRESS_P (x))
8972 rtx c = get_pool_constant (x);
8973 machine_mode cmode = get_pool_mode (x);
8974 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8975 return true;
8978 return false;
8982 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8984 static bool
8985 rs6000_legitimate_combined_insn (rtx_insn *insn)
8987 int icode = INSN_CODE (insn);
8989 /* Reject creating doloop insns. Combine should not be allowed
8990 to create these for a number of reasons:
8991 1) In a nested loop, if combine creates one of these in an
8992 outer loop and the register allocator happens to allocate ctr
8993 to the outer loop insn, then the inner loop can't use ctr.
8994 Inner loops ought to be more highly optimized.
8995 2) Combine often wants to create one of these from what was
8996 originally a three insn sequence, first combining the three
8997 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8998 allocated ctr, the splitter takes use back to the three insn
8999 sequence. It's better to stop combine at the two insn
9000 sequence.
9001 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9002 insns, the register allocator sometimes uses floating point
9003 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9004 jump insn and output reloads are not implemented for jumps,
9005 the ctrsi/ctrdi splitters need to handle all possible cases.
9006 That's a pain, and it gets to be seriously difficult when a
9007 splitter that runs after reload needs memory to transfer from
9008 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9009 for the difficult case. It's better to not create problems
9010 in the first place. */
9011 if (icode != CODE_FOR_nothing
9012 && (icode == CODE_FOR_ctrsi_internal1
9013 || icode == CODE_FOR_ctrdi_internal1
9014 || icode == CODE_FOR_ctrsi_internal2
9015 || icode == CODE_FOR_ctrdi_internal2
9016 || icode == CODE_FOR_ctrsi_internal3
9017 || icode == CODE_FOR_ctrdi_internal3
9018 || icode == CODE_FOR_ctrsi_internal4
9019 || icode == CODE_FOR_ctrdi_internal4))
9020 return false;
9022 return true;
9025 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9027 static GTY(()) rtx rs6000_tls_symbol;
9028 static rtx
9029 rs6000_tls_get_addr (void)
9031 if (!rs6000_tls_symbol)
9032 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9034 return rs6000_tls_symbol;
9037 /* Construct the SYMBOL_REF for TLS GOT references. */
9039 static GTY(()) rtx rs6000_got_symbol;
9040 static rtx
9041 rs6000_got_sym (void)
9043 if (!rs6000_got_symbol)
9045 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9046 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9047 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9050 return rs6000_got_symbol;
9053 /* AIX Thread-Local Address support. */
9055 static rtx
9056 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9058 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9059 const char *name;
9060 char *tlsname;
9062 name = XSTR (addr, 0);
9063 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9064 or the symbol will be in TLS private data section. */
9065 if (name[strlen (name) - 1] != ']'
9066 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9067 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9069 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9070 strcpy (tlsname, name);
9071 strcat (tlsname,
9072 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9073 tlsaddr = copy_rtx (addr);
9074 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9076 else
9077 tlsaddr = addr;
9079 /* Place addr into TOC constant pool. */
9080 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9082 /* Output the TOC entry and create the MEM referencing the value. */
9083 if (constant_pool_expr_p (XEXP (sym, 0))
9084 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9086 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9087 mem = gen_const_mem (Pmode, tocref);
9088 set_mem_alias_set (mem, get_TOC_alias_set ());
9090 else
9091 return sym;
9093 /* Use global-dynamic for local-dynamic. */
9094 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9095 || model == TLS_MODEL_LOCAL_DYNAMIC)
9097 /* Create new TOC reference for @m symbol. */
9098 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9099 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9100 strcpy (tlsname, "*LCM");
9101 strcat (tlsname, name + 3);
9102 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9103 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9104 tocref = create_TOC_reference (modaddr, NULL_RTX);
9105 rtx modmem = gen_const_mem (Pmode, tocref);
9106 set_mem_alias_set (modmem, get_TOC_alias_set ());
9108 rtx modreg = gen_reg_rtx (Pmode);
9109 emit_insn (gen_rtx_SET (modreg, modmem));
9111 tmpreg = gen_reg_rtx (Pmode);
9112 emit_insn (gen_rtx_SET (tmpreg, mem));
9114 dest = gen_reg_rtx (Pmode);
9115 if (TARGET_32BIT)
9116 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9117 else
9118 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9119 return dest;
9121 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9122 else if (TARGET_32BIT)
9124 tlsreg = gen_reg_rtx (SImode);
9125 emit_insn (gen_tls_get_tpointer (tlsreg));
9127 else
9128 tlsreg = gen_rtx_REG (DImode, 13);
9130 /* Load the TOC value into temporary register. */
9131 tmpreg = gen_reg_rtx (Pmode);
9132 emit_insn (gen_rtx_SET (tmpreg, mem));
9133 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9134 gen_rtx_MINUS (Pmode, addr, tlsreg));
9136 /* Add TOC symbol value to TLS pointer. */
9137 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9139 return dest;
9142 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9143 this (thread-local) address. */
9145 static rtx
9146 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9148 rtx dest, insn;
9150 if (TARGET_XCOFF)
9151 return rs6000_legitimize_tls_address_aix (addr, model);
9153 dest = gen_reg_rtx (Pmode);
9154 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9156 rtx tlsreg;
9158 if (TARGET_64BIT)
9160 tlsreg = gen_rtx_REG (Pmode, 13);
9161 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9163 else
9165 tlsreg = gen_rtx_REG (Pmode, 2);
9166 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9168 emit_insn (insn);
9170 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9172 rtx tlsreg, tmp;
9174 tmp = gen_reg_rtx (Pmode);
9175 if (TARGET_64BIT)
9177 tlsreg = gen_rtx_REG (Pmode, 13);
9178 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9180 else
9182 tlsreg = gen_rtx_REG (Pmode, 2);
9183 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9185 emit_insn (insn);
9186 if (TARGET_64BIT)
9187 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9188 else
9189 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9190 emit_insn (insn);
9192 else
9194 rtx r3, got, tga, tmp1, tmp2, call_insn;
9196 /* We currently use relocations like @got@tlsgd for tls, which
9197 means the linker will handle allocation of tls entries, placing
9198 them in the .got section. So use a pointer to the .got section,
9199 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9200 or to secondary GOT sections used by 32-bit -fPIC. */
9201 if (TARGET_64BIT)
9202 got = gen_rtx_REG (Pmode, 2);
9203 else
9205 if (flag_pic == 1)
9206 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9207 else
9209 rtx gsym = rs6000_got_sym ();
9210 got = gen_reg_rtx (Pmode);
9211 if (flag_pic == 0)
9212 rs6000_emit_move (got, gsym, Pmode);
9213 else
9215 rtx mem, lab;
9217 tmp1 = gen_reg_rtx (Pmode);
9218 tmp2 = gen_reg_rtx (Pmode);
9219 mem = gen_const_mem (Pmode, tmp1);
9220 lab = gen_label_rtx ();
9221 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9222 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9223 if (TARGET_LINK_STACK)
9224 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9225 emit_move_insn (tmp2, mem);
9226 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9227 set_unique_reg_note (last, REG_EQUAL, gsym);
9232 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9234 tga = rs6000_tls_get_addr ();
9235 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9236 const0_rtx, Pmode);
9238 r3 = gen_rtx_REG (Pmode, 3);
9239 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9241 if (TARGET_64BIT)
9242 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9243 else
9244 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9246 else if (DEFAULT_ABI == ABI_V4)
9247 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9248 else
9249 gcc_unreachable ();
9250 call_insn = last_call_insn ();
9251 PATTERN (call_insn) = insn;
9252 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9253 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9254 pic_offset_table_rtx);
9256 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9258 tga = rs6000_tls_get_addr ();
9259 tmp1 = gen_reg_rtx (Pmode);
9260 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9261 const0_rtx, Pmode);
9263 r3 = gen_rtx_REG (Pmode, 3);
9264 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9266 if (TARGET_64BIT)
9267 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9268 else
9269 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9271 else if (DEFAULT_ABI == ABI_V4)
9272 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9273 else
9274 gcc_unreachable ();
9275 call_insn = last_call_insn ();
9276 PATTERN (call_insn) = insn;
9277 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9278 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9279 pic_offset_table_rtx);
9281 if (rs6000_tls_size == 16)
9283 if (TARGET_64BIT)
9284 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9285 else
9286 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9288 else if (rs6000_tls_size == 32)
9290 tmp2 = gen_reg_rtx (Pmode);
9291 if (TARGET_64BIT)
9292 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9293 else
9294 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9295 emit_insn (insn);
9296 if (TARGET_64BIT)
9297 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9298 else
9299 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9301 else
9303 tmp2 = gen_reg_rtx (Pmode);
9304 if (TARGET_64BIT)
9305 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9306 else
9307 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9308 emit_insn (insn);
9309 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9311 emit_insn (insn);
9313 else
9315 /* IE, or 64-bit offset LE. */
9316 tmp2 = gen_reg_rtx (Pmode);
9317 if (TARGET_64BIT)
9318 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9319 else
9320 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9321 emit_insn (insn);
9322 if (TARGET_64BIT)
9323 insn = gen_tls_tls_64 (dest, tmp2, addr);
9324 else
9325 insn = gen_tls_tls_32 (dest, tmp2, addr);
9326 emit_insn (insn);
9330 return dest;
9333 /* Only create the global variable for the stack protect guard if we are using
9334 the global flavor of that guard. */
9335 static tree
9336 rs6000_init_stack_protect_guard (void)
9338 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9339 return default_stack_protect_guard ();
9341 return NULL_TREE;
9344 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9346 static bool
9347 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9349 if (GET_CODE (x) == HIGH
9350 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9351 return true;
9353 /* A TLS symbol in the TOC cannot contain a sum. */
9354 if (GET_CODE (x) == CONST
9355 && GET_CODE (XEXP (x, 0)) == PLUS
9356 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9357 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9358 return true;
9360 /* Do not place an ELF TLS symbol in the constant pool. */
9361 return TARGET_ELF && tls_referenced_p (x);
9364 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9365 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9366 can be addressed relative to the toc pointer. */
9368 static bool
9369 use_toc_relative_ref (rtx sym, machine_mode mode)
9371 return ((constant_pool_expr_p (sym)
9372 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9373 get_pool_mode (sym)))
9374 || (TARGET_CMODEL == CMODEL_MEDIUM
9375 && SYMBOL_REF_LOCAL_P (sym)
9376 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9379 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9380 replace the input X, or the original X if no replacement is called for.
9381 The output parameter *WIN is 1 if the calling macro should goto WIN,
9382 0 if it should not.
9384 For RS/6000, we wish to handle large displacements off a base
9385 register by splitting the addend across an addiu/addis and the mem insn.
9386 This cuts number of extra insns needed from 3 to 1.
9388 On Darwin, we use this to generate code for floating point constants.
9389 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9390 The Darwin code is inside #if TARGET_MACHO because only then are the
9391 machopic_* functions defined. */
9392 static rtx
9393 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9394 int opnum, int type,
9395 int ind_levels ATTRIBUTE_UNUSED, int *win)
9397 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9398 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9400 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9401 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9402 if (reg_offset_p
9403 && opnum == 1
9404 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9405 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9406 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9407 && TARGET_P9_VECTOR)
9408 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9409 && TARGET_P9_VECTOR)))
9410 reg_offset_p = false;
9412 /* We must recognize output that we have already generated ourselves. */
9413 if (GET_CODE (x) == PLUS
9414 && GET_CODE (XEXP (x, 0)) == PLUS
9415 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9416 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9417 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9419 if (TARGET_DEBUG_ADDR)
9421 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9422 debug_rtx (x);
9424 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9425 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9426 opnum, (enum reload_type) type);
9427 *win = 1;
9428 return x;
9431 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9432 if (GET_CODE (x) == LO_SUM
9433 && GET_CODE (XEXP (x, 0)) == HIGH)
9435 if (TARGET_DEBUG_ADDR)
9437 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9438 debug_rtx (x);
9440 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9441 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9442 opnum, (enum reload_type) type);
9443 *win = 1;
9444 return x;
9447 #if TARGET_MACHO
9448 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9449 && GET_CODE (x) == LO_SUM
9450 && GET_CODE (XEXP (x, 0)) == PLUS
9451 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9452 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9453 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9454 && machopic_operand_p (XEXP (x, 1)))
9456 /* Result of previous invocation of this function on Darwin
9457 floating point constant. */
9458 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9459 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9460 opnum, (enum reload_type) type);
9461 *win = 1;
9462 return x;
9464 #endif
9466 if (TARGET_CMODEL != CMODEL_SMALL
9467 && reg_offset_p
9468 && !quad_offset_p
9469 && small_toc_ref (x, VOIDmode))
9471 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9472 x = gen_rtx_LO_SUM (Pmode, hi, x);
9473 if (TARGET_DEBUG_ADDR)
9475 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9476 debug_rtx (x);
9478 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9479 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9480 opnum, (enum reload_type) type);
9481 *win = 1;
9482 return x;
9485 if (GET_CODE (x) == PLUS
9486 && REG_P (XEXP (x, 0))
9487 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9488 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9489 && CONST_INT_P (XEXP (x, 1))
9490 && reg_offset_p
9491 && !PAIRED_VECTOR_MODE (mode)
9492 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9494 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9495 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9496 HOST_WIDE_INT high
9497 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9499 /* Check for 32-bit overflow or quad addresses with one of the
9500 four least significant bits set. */
9501 if (high + low != val
9502 || (quad_offset_p && (low & 0xf)))
9504 *win = 0;
9505 return x;
9508 /* Reload the high part into a base reg; leave the low part
9509 in the mem directly. */
9511 x = gen_rtx_PLUS (GET_MODE (x),
9512 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9513 GEN_INT (high)),
9514 GEN_INT (low));
9516 if (TARGET_DEBUG_ADDR)
9518 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9519 debug_rtx (x);
9521 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9522 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9523 opnum, (enum reload_type) type);
9524 *win = 1;
9525 return x;
9528 if (GET_CODE (x) == SYMBOL_REF
9529 && reg_offset_p
9530 && !quad_offset_p
9531 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9532 && !PAIRED_VECTOR_MODE (mode)
9533 #if TARGET_MACHO
9534 && DEFAULT_ABI == ABI_DARWIN
9535 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9536 && machopic_symbol_defined_p (x)
9537 #else
9538 && DEFAULT_ABI == ABI_V4
9539 && !flag_pic
9540 #endif
9541 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9542 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9543 without fprs.
9544 ??? Assume floating point reg based on mode? This assumption is
9545 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9546 where reload ends up doing a DFmode load of a constant from
9547 mem using two gprs. Unfortunately, at this point reload
9548 hasn't yet selected regs so poking around in reload data
9549 won't help and even if we could figure out the regs reliably,
9550 we'd still want to allow this transformation when the mem is
9551 naturally aligned. Since we say the address is good here, we
9552 can't disable offsets from LO_SUMs in mem_operand_gpr.
9553 FIXME: Allow offset from lo_sum for other modes too, when
9554 mem is sufficiently aligned.
9556 Also disallow this if the type can go in VMX/Altivec registers, since
9557 those registers do not have d-form (reg+offset) address modes. */
9558 && !reg_addr[mode].scalar_in_vmx_p
9559 && mode != TFmode
9560 && mode != TDmode
9561 && mode != IFmode
9562 && mode != KFmode
9563 && (mode != TImode || !TARGET_VSX)
9564 && mode != PTImode
9565 && (mode != DImode || TARGET_POWERPC64)
9566 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9567 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9569 #if TARGET_MACHO
9570 if (flag_pic)
9572 rtx offset = machopic_gen_offset (x);
9573 x = gen_rtx_LO_SUM (GET_MODE (x),
9574 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9575 gen_rtx_HIGH (Pmode, offset)), offset);
9577 else
9578 #endif
9579 x = gen_rtx_LO_SUM (GET_MODE (x),
9580 gen_rtx_HIGH (Pmode, x), x);
9582 if (TARGET_DEBUG_ADDR)
9584 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9585 debug_rtx (x);
9587 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9588 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9589 opnum, (enum reload_type) type);
9590 *win = 1;
9591 return x;
9594 /* Reload an offset address wrapped by an AND that represents the
9595 masking of the lower bits. Strip the outer AND and let reload
9596 convert the offset address into an indirect address. For VSX,
9597 force reload to create the address with an AND in a separate
9598 register, because we can't guarantee an altivec register will
9599 be used. */
9600 if (VECTOR_MEM_ALTIVEC_P (mode)
9601 && GET_CODE (x) == AND
9602 && GET_CODE (XEXP (x, 0)) == PLUS
9603 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9604 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9605 && GET_CODE (XEXP (x, 1)) == CONST_INT
9606 && INTVAL (XEXP (x, 1)) == -16)
9608 x = XEXP (x, 0);
9609 *win = 1;
9610 return x;
9613 if (TARGET_TOC
9614 && reg_offset_p
9615 && !quad_offset_p
9616 && GET_CODE (x) == SYMBOL_REF
9617 && use_toc_relative_ref (x, mode))
9619 x = create_TOC_reference (x, NULL_RTX);
9620 if (TARGET_CMODEL != CMODEL_SMALL)
9622 if (TARGET_DEBUG_ADDR)
9624 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9625 debug_rtx (x);
9627 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9628 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9629 opnum, (enum reload_type) type);
9631 *win = 1;
9632 return x;
9634 *win = 0;
9635 return x;
9638 /* Debug version of rs6000_legitimize_reload_address. */
9639 static rtx
9640 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9641 int opnum, int type,
9642 int ind_levels, int *win)
9644 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9645 ind_levels, win);
9646 fprintf (stderr,
9647 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9648 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9649 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9650 debug_rtx (x);
9652 if (x == ret)
9653 fprintf (stderr, "Same address returned\n");
9654 else if (!ret)
9655 fprintf (stderr, "NULL returned\n");
9656 else
9658 fprintf (stderr, "New address:\n");
9659 debug_rtx (ret);
9662 return ret;
9665 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9666 that is a valid memory address for an instruction.
9667 The MODE argument is the machine mode for the MEM expression
9668 that wants to use this address.
9670 On the RS/6000, there are four valid address: a SYMBOL_REF that
9671 refers to a constant pool entry of an address (or the sum of it
9672 plus a constant), a short (16-bit signed) constant plus a register,
9673 the sum of two registers, or a register indirect, possibly with an
9674 auto-increment. For DFmode, DDmode and DImode with a constant plus
9675 register, we must ensure that both words are addressable or PowerPC64
9676 with offset word aligned.
9678 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9679 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9680 because adjacent memory cells are accessed by adding word-sized offsets
9681 during assembly output. */
9682 static bool
9683 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9685 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9686 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9688 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9689 if (VECTOR_MEM_ALTIVEC_P (mode)
9690 && GET_CODE (x) == AND
9691 && GET_CODE (XEXP (x, 1)) == CONST_INT
9692 && INTVAL (XEXP (x, 1)) == -16)
9693 x = XEXP (x, 0);
9695 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9696 return 0;
9697 if (legitimate_indirect_address_p (x, reg_ok_strict))
9698 return 1;
9699 if (TARGET_UPDATE
9700 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9701 && mode_supports_pre_incdec_p (mode)
9702 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9703 return 1;
9704 /* Handle restricted vector d-form offsets in ISA 3.0. */
9705 if (quad_offset_p)
9707 if (quad_address_p (x, mode, reg_ok_strict))
9708 return 1;
9710 else if (virtual_stack_registers_memory_p (x))
9711 return 1;
9713 else if (reg_offset_p)
9715 if (legitimate_small_data_p (mode, x))
9716 return 1;
9717 if (legitimate_constant_pool_address_p (x, mode,
9718 reg_ok_strict || lra_in_progress))
9719 return 1;
9720 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9721 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9722 return 1;
9725 /* For TImode, if we have TImode in VSX registers, only allow register
9726 indirect addresses. This will allow the values to go in either GPRs
9727 or VSX registers without reloading. The vector types would tend to
9728 go into VSX registers, so we allow REG+REG, while TImode seems
9729 somewhat split, in that some uses are GPR based, and some VSX based. */
9730 /* FIXME: We could loosen this by changing the following to
9731 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9732 but currently we cannot allow REG+REG addressing for TImode. See
9733 PR72827 for complete details on how this ends up hoodwinking DSE. */
9734 if (mode == TImode && TARGET_VSX)
9735 return 0;
9736 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9737 if (! reg_ok_strict
9738 && reg_offset_p
9739 && GET_CODE (x) == PLUS
9740 && GET_CODE (XEXP (x, 0)) == REG
9741 && (XEXP (x, 0) == virtual_stack_vars_rtx
9742 || XEXP (x, 0) == arg_pointer_rtx)
9743 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9744 return 1;
9745 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9746 return 1;
9747 if (!FLOAT128_2REG_P (mode)
9748 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9749 || TARGET_POWERPC64
9750 || (mode != DFmode && mode != DDmode))
9751 && (TARGET_POWERPC64 || mode != DImode)
9752 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9753 && mode != PTImode
9754 && !avoiding_indexed_address_p (mode)
9755 && legitimate_indexed_address_p (x, reg_ok_strict))
9756 return 1;
9757 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9758 && mode_supports_pre_modify_p (mode)
9759 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9760 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9761 reg_ok_strict, false)
9762 || (!avoiding_indexed_address_p (mode)
9763 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9764 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9765 return 1;
9766 if (reg_offset_p && !quad_offset_p
9767 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9768 return 1;
9769 return 0;
9772 /* Debug version of rs6000_legitimate_address_p. */
9773 static bool
9774 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9775 bool reg_ok_strict)
9777 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9778 fprintf (stderr,
9779 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9780 "strict = %d, reload = %s, code = %s\n",
9781 ret ? "true" : "false",
9782 GET_MODE_NAME (mode),
9783 reg_ok_strict,
9784 (reload_completed ? "after" : "before"),
9785 GET_RTX_NAME (GET_CODE (x)));
9786 debug_rtx (x);
9788 return ret;
9791 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9793 static bool
9794 rs6000_mode_dependent_address_p (const_rtx addr,
9795 addr_space_t as ATTRIBUTE_UNUSED)
9797 return rs6000_mode_dependent_address_ptr (addr);
9800 /* Go to LABEL if ADDR (a legitimate address expression)
9801 has an effect that depends on the machine mode it is used for.
9803 On the RS/6000 this is true of all integral offsets (since AltiVec
9804 and VSX modes don't allow them) or is a pre-increment or decrement.
9806 ??? Except that due to conceptual problems in offsettable_address_p
9807 we can't really report the problems of integral offsets. So leave
9808 this assuming that the adjustable offset must be valid for the
9809 sub-words of a TFmode operand, which is what we had before. */
9811 static bool
9812 rs6000_mode_dependent_address (const_rtx addr)
9814 switch (GET_CODE (addr))
9816 case PLUS:
9817 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9818 is considered a legitimate address before reload, so there
9819 are no offset restrictions in that case. Note that this
9820 condition is safe in strict mode because any address involving
9821 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9822 been rejected as illegitimate. */
9823 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9824 && XEXP (addr, 0) != arg_pointer_rtx
9825 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9827 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9828 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9830 break;
9832 case LO_SUM:
9833 /* Anything in the constant pool is sufficiently aligned that
9834 all bytes have the same high part address. */
9835 return !legitimate_constant_pool_address_p (addr, QImode, false);
9837 /* Auto-increment cases are now treated generically in recog.c. */
9838 case PRE_MODIFY:
9839 return TARGET_UPDATE;
9841 /* AND is only allowed in Altivec loads. */
9842 case AND:
9843 return true;
9845 default:
9846 break;
9849 return false;
9852 /* Debug version of rs6000_mode_dependent_address. */
9853 static bool
9854 rs6000_debug_mode_dependent_address (const_rtx addr)
9856 bool ret = rs6000_mode_dependent_address (addr);
9858 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9859 ret ? "true" : "false");
9860 debug_rtx (addr);
9862 return ret;
9865 /* Implement FIND_BASE_TERM. */
9868 rs6000_find_base_term (rtx op)
9870 rtx base;
9872 base = op;
9873 if (GET_CODE (base) == CONST)
9874 base = XEXP (base, 0);
9875 if (GET_CODE (base) == PLUS)
9876 base = XEXP (base, 0);
9877 if (GET_CODE (base) == UNSPEC)
9878 switch (XINT (base, 1))
9880 case UNSPEC_TOCREL:
9881 case UNSPEC_MACHOPIC_OFFSET:
9882 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9883 for aliasing purposes. */
9884 return XVECEXP (base, 0, 0);
9887 return op;
9890 /* More elaborate version of recog's offsettable_memref_p predicate
9891 that works around the ??? note of rs6000_mode_dependent_address.
9892 In particular it accepts
9894 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9896 in 32-bit mode, that the recog predicate rejects. */
9898 static bool
9899 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9901 bool worst_case;
9903 if (!MEM_P (op))
9904 return false;
9906 /* First mimic offsettable_memref_p. */
9907 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9908 return true;
9910 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9911 the latter predicate knows nothing about the mode of the memory
9912 reference and, therefore, assumes that it is the largest supported
9913 mode (TFmode). As a consequence, legitimate offsettable memory
9914 references are rejected. rs6000_legitimate_offset_address_p contains
9915 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9916 at least with a little bit of help here given that we know the
9917 actual registers used. */
9918 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9919 || GET_MODE_SIZE (reg_mode) == 4);
9920 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9921 true, worst_case);
9924 /* Determine the reassociation width to be used in reassociate_bb.
9925 This takes into account how many parallel operations we
9926 can actually do of a given type, and also the latency.
9928 int add/sub 6/cycle
9929 mul 2/cycle
9930 vect add/sub/mul 2/cycle
9931 fp add/sub/mul 2/cycle
9932 dfp 1/cycle
9935 static int
9936 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9937 machine_mode mode)
9939 switch (rs6000_cpu)
9941 case PROCESSOR_POWER8:
9942 case PROCESSOR_POWER9:
9943 if (DECIMAL_FLOAT_MODE_P (mode))
9944 return 1;
9945 if (VECTOR_MODE_P (mode))
9946 return 4;
9947 if (INTEGRAL_MODE_P (mode))
9948 return opc == MULT_EXPR ? 4 : 6;
9949 if (FLOAT_MODE_P (mode))
9950 return 4;
9951 break;
9952 default:
9953 break;
9955 return 1;
9958 /* Change register usage conditional on target flags. */
9959 static void
9960 rs6000_conditional_register_usage (void)
9962 int i;
9964 if (TARGET_DEBUG_TARGET)
9965 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9967 /* Set MQ register fixed (already call_used) so that it will not be
9968 allocated. */
9969 fixed_regs[64] = 1;
9971 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9972 if (TARGET_64BIT)
9973 fixed_regs[13] = call_used_regs[13]
9974 = call_really_used_regs[13] = 1;
9976 /* Conditionally disable FPRs. */
9977 if (TARGET_SOFT_FLOAT)
9978 for (i = 32; i < 64; i++)
9979 fixed_regs[i] = call_used_regs[i]
9980 = call_really_used_regs[i] = 1;
9982 /* The TOC register is not killed across calls in a way that is
9983 visible to the compiler. */
9984 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9985 call_really_used_regs[2] = 0;
9987 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9988 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9990 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9991 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9992 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9993 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9995 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9996 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9997 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9998 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10000 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10001 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10002 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10004 if (!TARGET_ALTIVEC && !TARGET_VSX)
10006 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10007 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10008 call_really_used_regs[VRSAVE_REGNO] = 1;
10011 if (TARGET_ALTIVEC || TARGET_VSX)
10012 global_regs[VSCR_REGNO] = 1;
10014 if (TARGET_ALTIVEC_ABI)
10016 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10017 call_used_regs[i] = call_really_used_regs[i] = 1;
10019 /* AIX reserves VR20:31 in non-extended ABI mode. */
10020 if (TARGET_XCOFF)
10021 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10022 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10027 /* Output insns to set DEST equal to the constant SOURCE as a series of
10028 lis, ori and shl instructions and return TRUE. */
10030 bool
10031 rs6000_emit_set_const (rtx dest, rtx source)
10033 machine_mode mode = GET_MODE (dest);
10034 rtx temp, set;
10035 rtx_insn *insn;
10036 HOST_WIDE_INT c;
10038 gcc_checking_assert (CONST_INT_P (source));
10039 c = INTVAL (source);
10040 switch (mode)
10042 case E_QImode:
10043 case E_HImode:
10044 emit_insn (gen_rtx_SET (dest, source));
10045 return true;
10047 case E_SImode:
10048 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10050 emit_insn (gen_rtx_SET (copy_rtx (temp),
10051 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10052 emit_insn (gen_rtx_SET (dest,
10053 gen_rtx_IOR (SImode, copy_rtx (temp),
10054 GEN_INT (c & 0xffff))));
10055 break;
10057 case E_DImode:
10058 if (!TARGET_POWERPC64)
10060 rtx hi, lo;
10062 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10063 DImode);
10064 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10065 DImode);
10066 emit_move_insn (hi, GEN_INT (c >> 32));
10067 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10068 emit_move_insn (lo, GEN_INT (c));
10070 else
10071 rs6000_emit_set_long_const (dest, c);
10072 break;
10074 default:
10075 gcc_unreachable ();
10078 insn = get_last_insn ();
10079 set = single_set (insn);
10080 if (! CONSTANT_P (SET_SRC (set)))
10081 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10083 return true;
10086 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10087 Output insns to set DEST equal to the constant C as a series of
10088 lis, ori and shl instructions. */
10090 static void
10091 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10093 rtx temp;
10094 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10096 ud1 = c & 0xffff;
10097 c = c >> 16;
10098 ud2 = c & 0xffff;
10099 c = c >> 16;
10100 ud3 = c & 0xffff;
10101 c = c >> 16;
10102 ud4 = c & 0xffff;
10104 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10105 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10106 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10108 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10109 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10111 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10113 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10114 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10115 if (ud1 != 0)
10116 emit_move_insn (dest,
10117 gen_rtx_IOR (DImode, copy_rtx (temp),
10118 GEN_INT (ud1)));
10120 else if (ud3 == 0 && ud4 == 0)
10122 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10124 gcc_assert (ud2 & 0x8000);
10125 emit_move_insn (copy_rtx (temp),
10126 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10127 if (ud1 != 0)
10128 emit_move_insn (copy_rtx (temp),
10129 gen_rtx_IOR (DImode, copy_rtx (temp),
10130 GEN_INT (ud1)));
10131 emit_move_insn (dest,
10132 gen_rtx_ZERO_EXTEND (DImode,
10133 gen_lowpart (SImode,
10134 copy_rtx (temp))));
10136 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10137 || (ud4 == 0 && ! (ud3 & 0x8000)))
10139 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10141 emit_move_insn (copy_rtx (temp),
10142 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10143 if (ud2 != 0)
10144 emit_move_insn (copy_rtx (temp),
10145 gen_rtx_IOR (DImode, copy_rtx (temp),
10146 GEN_INT (ud2)));
10147 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10148 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10149 GEN_INT (16)));
10150 if (ud1 != 0)
10151 emit_move_insn (dest,
10152 gen_rtx_IOR (DImode, copy_rtx (temp),
10153 GEN_INT (ud1)));
10155 else
10157 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10159 emit_move_insn (copy_rtx (temp),
10160 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10161 if (ud3 != 0)
10162 emit_move_insn (copy_rtx (temp),
10163 gen_rtx_IOR (DImode, copy_rtx (temp),
10164 GEN_INT (ud3)));
10166 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10167 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10168 GEN_INT (32)));
10169 if (ud2 != 0)
10170 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10171 gen_rtx_IOR (DImode, copy_rtx (temp),
10172 GEN_INT (ud2 << 16)));
10173 if (ud1 != 0)
10174 emit_move_insn (dest,
10175 gen_rtx_IOR (DImode, copy_rtx (temp),
10176 GEN_INT (ud1)));
10180 /* Helper for the following. Get rid of [r+r] memory refs
10181 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10183 static void
10184 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10186 if (GET_CODE (operands[0]) == MEM
10187 && GET_CODE (XEXP (operands[0], 0)) != REG
10188 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10189 GET_MODE (operands[0]), false))
10190 operands[0]
10191 = replace_equiv_address (operands[0],
10192 copy_addr_to_reg (XEXP (operands[0], 0)));
10194 if (GET_CODE (operands[1]) == MEM
10195 && GET_CODE (XEXP (operands[1], 0)) != REG
10196 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10197 GET_MODE (operands[1]), false))
10198 operands[1]
10199 = replace_equiv_address (operands[1],
10200 copy_addr_to_reg (XEXP (operands[1], 0)));
10203 /* Generate a vector of constants to permute MODE for a little-endian
10204 storage operation by swapping the two halves of a vector. */
10205 static rtvec
10206 rs6000_const_vec (machine_mode mode)
10208 int i, subparts;
10209 rtvec v;
10211 switch (mode)
10213 case E_V1TImode:
10214 subparts = 1;
10215 break;
10216 case E_V2DFmode:
10217 case E_V2DImode:
10218 subparts = 2;
10219 break;
10220 case E_V4SFmode:
10221 case E_V4SImode:
10222 subparts = 4;
10223 break;
10224 case E_V8HImode:
10225 subparts = 8;
10226 break;
10227 case E_V16QImode:
10228 subparts = 16;
10229 break;
10230 default:
10231 gcc_unreachable();
10234 v = rtvec_alloc (subparts);
10236 for (i = 0; i < subparts / 2; ++i)
10237 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10238 for (i = subparts / 2; i < subparts; ++i)
10239 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10241 return v;
10244 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10245 store operation. */
10246 void
10247 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10249 /* Scalar permutations are easier to express in integer modes rather than
10250 floating-point modes, so cast them here. We use V1TImode instead
10251 of TImode to ensure that the values don't go through GPRs. */
10252 if (FLOAT128_VECTOR_P (mode))
10254 dest = gen_lowpart (V1TImode, dest);
10255 source = gen_lowpart (V1TImode, source);
10256 mode = V1TImode;
10259 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10260 scalar. */
10261 if (mode == TImode || mode == V1TImode)
10262 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10263 GEN_INT (64))));
10264 else
10266 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10267 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10271 /* Emit a little-endian load from vector memory location SOURCE to VSX
10272 register DEST in mode MODE. The load is done with two permuting
10273 insn's that represent an lxvd2x and xxpermdi. */
10274 void
10275 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10277 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10278 V1TImode). */
10279 if (mode == TImode || mode == V1TImode)
10281 mode = V2DImode;
10282 dest = gen_lowpart (V2DImode, dest);
10283 source = adjust_address (source, V2DImode, 0);
10286 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10287 rs6000_emit_le_vsx_permute (tmp, source, mode);
10288 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10291 /* Emit a little-endian store to vector memory location DEST from VSX
10292 register SOURCE in mode MODE. The store is done with two permuting
10293 insn's that represent an xxpermdi and an stxvd2x. */
10294 void
10295 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10297 /* This should never be called during or after LRA, because it does
10298 not re-permute the source register. It is intended only for use
10299 during expand. */
10300 gcc_assert (!lra_in_progress && !reload_completed);
10302 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10303 V1TImode). */
10304 if (mode == TImode || mode == V1TImode)
10306 mode = V2DImode;
10307 dest = adjust_address (dest, V2DImode, 0);
10308 source = gen_lowpart (V2DImode, source);
10311 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10312 rs6000_emit_le_vsx_permute (tmp, source, mode);
10313 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10316 /* Emit a sequence representing a little-endian VSX load or store,
10317 moving data from SOURCE to DEST in mode MODE. This is done
10318 separately from rs6000_emit_move to ensure it is called only
10319 during expand. LE VSX loads and stores introduced later are
10320 handled with a split. The expand-time RTL generation allows
10321 us to optimize away redundant pairs of register-permutes. */
10322 void
10323 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10325 gcc_assert (!BYTES_BIG_ENDIAN
10326 && VECTOR_MEM_VSX_P (mode)
10327 && !TARGET_P9_VECTOR
10328 && !gpr_or_gpr_p (dest, source)
10329 && (MEM_P (source) ^ MEM_P (dest)));
10331 if (MEM_P (source))
10333 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10334 rs6000_emit_le_vsx_load (dest, source, mode);
10336 else
10338 if (!REG_P (source))
10339 source = force_reg (mode, source);
10340 rs6000_emit_le_vsx_store (dest, source, mode);
10344 /* Return whether a SFmode or SImode move can be done without converting one
10345 mode to another. This arrises when we have:
10347 (SUBREG:SF (REG:SI ...))
10348 (SUBREG:SI (REG:SF ...))
10350 and one of the values is in a floating point/vector register, where SFmode
10351 scalars are stored in DFmode format. */
10353 bool
10354 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10356 if (TARGET_ALLOW_SF_SUBREG)
10357 return true;
10359 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10360 return true;
10362 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10363 return true;
10365 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10366 if (SUBREG_P (dest))
10368 rtx dest_subreg = SUBREG_REG (dest);
10369 rtx src_subreg = SUBREG_REG (src);
10370 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10373 return false;
10377 /* Helper function to change moves with:
10379 (SUBREG:SF (REG:SI)) and
10380 (SUBREG:SI (REG:SF))
10382 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10383 values are stored as DFmode values in the VSX registers. We need to convert
10384 the bits before we can use a direct move or operate on the bits in the
10385 vector register as an integer type.
10387 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10389 static bool
10390 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10392 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10393 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10394 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10396 rtx inner_source = SUBREG_REG (source);
10397 machine_mode inner_mode = GET_MODE (inner_source);
10399 if (mode == SImode && inner_mode == SFmode)
10401 emit_insn (gen_movsi_from_sf (dest, inner_source));
10402 return true;
10405 if (mode == SFmode && inner_mode == SImode)
10407 emit_insn (gen_movsf_from_si (dest, inner_source));
10408 return true;
10412 return false;
10415 /* Emit a move from SOURCE to DEST in mode MODE. */
10416 void
10417 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10419 rtx operands[2];
10420 operands[0] = dest;
10421 operands[1] = source;
10423 if (TARGET_DEBUG_ADDR)
10425 fprintf (stderr,
10426 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10427 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10428 GET_MODE_NAME (mode),
10429 lra_in_progress,
10430 reload_completed,
10431 can_create_pseudo_p ());
10432 debug_rtx (dest);
10433 fprintf (stderr, "source:\n");
10434 debug_rtx (source);
10437 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10438 if (CONST_WIDE_INT_P (operands[1])
10439 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10441 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10442 gcc_unreachable ();
10445 /* See if we need to special case SImode/SFmode SUBREG moves. */
10446 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10447 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10448 return;
10450 /* Check if GCC is setting up a block move that will end up using FP
10451 registers as temporaries. We must make sure this is acceptable. */
10452 if (GET_CODE (operands[0]) == MEM
10453 && GET_CODE (operands[1]) == MEM
10454 && mode == DImode
10455 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10456 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10457 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10458 ? 32 : MEM_ALIGN (operands[0])))
10459 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10460 ? 32
10461 : MEM_ALIGN (operands[1]))))
10462 && ! MEM_VOLATILE_P (operands [0])
10463 && ! MEM_VOLATILE_P (operands [1]))
10465 emit_move_insn (adjust_address (operands[0], SImode, 0),
10466 adjust_address (operands[1], SImode, 0));
10467 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10468 adjust_address (copy_rtx (operands[1]), SImode, 4));
10469 return;
10472 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10473 && !gpc_reg_operand (operands[1], mode))
10474 operands[1] = force_reg (mode, operands[1]);
10476 /* Recognize the case where operand[1] is a reference to thread-local
10477 data and load its address to a register. */
10478 if (tls_referenced_p (operands[1]))
10480 enum tls_model model;
10481 rtx tmp = operands[1];
10482 rtx addend = NULL;
10484 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10486 addend = XEXP (XEXP (tmp, 0), 1);
10487 tmp = XEXP (XEXP (tmp, 0), 0);
10490 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10491 model = SYMBOL_REF_TLS_MODEL (tmp);
10492 gcc_assert (model != 0);
10494 tmp = rs6000_legitimize_tls_address (tmp, model);
10495 if (addend)
10497 tmp = gen_rtx_PLUS (mode, tmp, addend);
10498 tmp = force_operand (tmp, operands[0]);
10500 operands[1] = tmp;
10503 /* 128-bit constant floating-point values on Darwin should really be loaded
10504 as two parts. However, this premature splitting is a problem when DFmode
10505 values can go into Altivec registers. */
10506 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10507 && GET_CODE (operands[1]) == CONST_DOUBLE)
10509 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10510 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10511 DFmode);
10512 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10513 GET_MODE_SIZE (DFmode)),
10514 simplify_gen_subreg (DFmode, operands[1], mode,
10515 GET_MODE_SIZE (DFmode)),
10516 DFmode);
10517 return;
10520 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10521 p1:SD) if p1 is not of floating point class and p0 is spilled as
10522 we can have no analogous movsd_store for this. */
10523 if (lra_in_progress && mode == DDmode
10524 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10525 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10526 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10527 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10529 enum reg_class cl;
10530 int regno = REGNO (SUBREG_REG (operands[1]));
10532 if (regno >= FIRST_PSEUDO_REGISTER)
10534 cl = reg_preferred_class (regno);
10535 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10537 if (regno >= 0 && ! FP_REGNO_P (regno))
10539 mode = SDmode;
10540 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10541 operands[1] = SUBREG_REG (operands[1]);
10544 if (lra_in_progress
10545 && mode == SDmode
10546 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10547 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10548 && (REG_P (operands[1])
10549 || (GET_CODE (operands[1]) == SUBREG
10550 && REG_P (SUBREG_REG (operands[1])))))
10552 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10553 ? SUBREG_REG (operands[1]) : operands[1]);
10554 enum reg_class cl;
10556 if (regno >= FIRST_PSEUDO_REGISTER)
10558 cl = reg_preferred_class (regno);
10559 gcc_assert (cl != NO_REGS);
10560 regno = ira_class_hard_regs[cl][0];
10562 if (FP_REGNO_P (regno))
10564 if (GET_MODE (operands[0]) != DDmode)
10565 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10566 emit_insn (gen_movsd_store (operands[0], operands[1]));
10568 else if (INT_REGNO_P (regno))
10569 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10570 else
10571 gcc_unreachable();
10572 return;
10574 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10575 p:DD)) if p0 is not of floating point class and p1 is spilled as
10576 we can have no analogous movsd_load for this. */
10577 if (lra_in_progress && mode == DDmode
10578 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10579 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10580 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10581 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10583 enum reg_class cl;
10584 int regno = REGNO (SUBREG_REG (operands[0]));
10586 if (regno >= FIRST_PSEUDO_REGISTER)
10588 cl = reg_preferred_class (regno);
10589 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10591 if (regno >= 0 && ! FP_REGNO_P (regno))
10593 mode = SDmode;
10594 operands[0] = SUBREG_REG (operands[0]);
10595 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10598 if (lra_in_progress
10599 && mode == SDmode
10600 && (REG_P (operands[0])
10601 || (GET_CODE (operands[0]) == SUBREG
10602 && REG_P (SUBREG_REG (operands[0]))))
10603 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10604 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10606 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10607 ? SUBREG_REG (operands[0]) : operands[0]);
10608 enum reg_class cl;
10610 if (regno >= FIRST_PSEUDO_REGISTER)
10612 cl = reg_preferred_class (regno);
10613 gcc_assert (cl != NO_REGS);
10614 regno = ira_class_hard_regs[cl][0];
10616 if (FP_REGNO_P (regno))
10618 if (GET_MODE (operands[1]) != DDmode)
10619 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10620 emit_insn (gen_movsd_load (operands[0], operands[1]));
10622 else if (INT_REGNO_P (regno))
10623 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10624 else
10625 gcc_unreachable();
10626 return;
10629 /* FIXME: In the long term, this switch statement should go away
10630 and be replaced by a sequence of tests based on things like
10631 mode == Pmode. */
10632 switch (mode)
10634 case E_HImode:
10635 case E_QImode:
10636 if (CONSTANT_P (operands[1])
10637 && GET_CODE (operands[1]) != CONST_INT)
10638 operands[1] = force_const_mem (mode, operands[1]);
10639 break;
10641 case E_TFmode:
10642 case E_TDmode:
10643 case E_IFmode:
10644 case E_KFmode:
10645 if (FLOAT128_2REG_P (mode))
10646 rs6000_eliminate_indexed_memrefs (operands);
10647 /* fall through */
10649 case E_DFmode:
10650 case E_DDmode:
10651 case E_SFmode:
10652 case E_SDmode:
10653 if (CONSTANT_P (operands[1])
10654 && ! easy_fp_constant (operands[1], mode))
10655 operands[1] = force_const_mem (mode, operands[1]);
10656 break;
10658 case E_V16QImode:
10659 case E_V8HImode:
10660 case E_V4SFmode:
10661 case E_V4SImode:
10662 case E_V2SFmode:
10663 case E_V2SImode:
10664 case E_V2DFmode:
10665 case E_V2DImode:
10666 case E_V1TImode:
10667 if (CONSTANT_P (operands[1])
10668 && !easy_vector_constant (operands[1], mode))
10669 operands[1] = force_const_mem (mode, operands[1]);
10670 break;
10672 case E_SImode:
10673 case E_DImode:
10674 /* Use default pattern for address of ELF small data */
10675 if (TARGET_ELF
10676 && mode == Pmode
10677 && DEFAULT_ABI == ABI_V4
10678 && (GET_CODE (operands[1]) == SYMBOL_REF
10679 || GET_CODE (operands[1]) == CONST)
10680 && small_data_operand (operands[1], mode))
10682 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10683 return;
10686 if (DEFAULT_ABI == ABI_V4
10687 && mode == Pmode && mode == SImode
10688 && flag_pic == 1 && got_operand (operands[1], mode))
10690 emit_insn (gen_movsi_got (operands[0], operands[1]));
10691 return;
10694 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10695 && TARGET_NO_TOC
10696 && ! flag_pic
10697 && mode == Pmode
10698 && CONSTANT_P (operands[1])
10699 && GET_CODE (operands[1]) != HIGH
10700 && GET_CODE (operands[1]) != CONST_INT)
10702 rtx target = (!can_create_pseudo_p ()
10703 ? operands[0]
10704 : gen_reg_rtx (mode));
10706 /* If this is a function address on -mcall-aixdesc,
10707 convert it to the address of the descriptor. */
10708 if (DEFAULT_ABI == ABI_AIX
10709 && GET_CODE (operands[1]) == SYMBOL_REF
10710 && XSTR (operands[1], 0)[0] == '.')
10712 const char *name = XSTR (operands[1], 0);
10713 rtx new_ref;
10714 while (*name == '.')
10715 name++;
10716 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10717 CONSTANT_POOL_ADDRESS_P (new_ref)
10718 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10719 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10720 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10721 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10722 operands[1] = new_ref;
10725 if (DEFAULT_ABI == ABI_DARWIN)
10727 #if TARGET_MACHO
10728 if (MACHO_DYNAMIC_NO_PIC_P)
10730 /* Take care of any required data indirection. */
10731 operands[1] = rs6000_machopic_legitimize_pic_address (
10732 operands[1], mode, operands[0]);
10733 if (operands[0] != operands[1])
10734 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10735 return;
10737 #endif
10738 emit_insn (gen_macho_high (target, operands[1]));
10739 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10740 return;
10743 emit_insn (gen_elf_high (target, operands[1]));
10744 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10745 return;
10748 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10749 and we have put it in the TOC, we just need to make a TOC-relative
10750 reference to it. */
10751 if (TARGET_TOC
10752 && GET_CODE (operands[1]) == SYMBOL_REF
10753 && use_toc_relative_ref (operands[1], mode))
10754 operands[1] = create_TOC_reference (operands[1], operands[0]);
10755 else if (mode == Pmode
10756 && CONSTANT_P (operands[1])
10757 && GET_CODE (operands[1]) != HIGH
10758 && ((GET_CODE (operands[1]) != CONST_INT
10759 && ! easy_fp_constant (operands[1], mode))
10760 || (GET_CODE (operands[1]) == CONST_INT
10761 && (num_insns_constant (operands[1], mode)
10762 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10763 || (GET_CODE (operands[0]) == REG
10764 && FP_REGNO_P (REGNO (operands[0]))))
10765 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10766 && (TARGET_CMODEL == CMODEL_SMALL
10767 || can_create_pseudo_p ()
10768 || (REG_P (operands[0])
10769 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10772 #if TARGET_MACHO
10773 /* Darwin uses a special PIC legitimizer. */
10774 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10776 operands[1] =
10777 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10778 operands[0]);
10779 if (operands[0] != operands[1])
10780 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10781 return;
10783 #endif
10785 /* If we are to limit the number of things we put in the TOC and
10786 this is a symbol plus a constant we can add in one insn,
10787 just put the symbol in the TOC and add the constant. */
10788 if (GET_CODE (operands[1]) == CONST
10789 && TARGET_NO_SUM_IN_TOC
10790 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10791 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10792 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10793 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10794 && ! side_effects_p (operands[0]))
10796 rtx sym =
10797 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10798 rtx other = XEXP (XEXP (operands[1], 0), 1);
10800 sym = force_reg (mode, sym);
10801 emit_insn (gen_add3_insn (operands[0], sym, other));
10802 return;
10805 operands[1] = force_const_mem (mode, operands[1]);
10807 if (TARGET_TOC
10808 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10809 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10811 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10812 operands[0]);
10813 operands[1] = gen_const_mem (mode, tocref);
10814 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10817 break;
10819 case E_TImode:
10820 if (!VECTOR_MEM_VSX_P (TImode))
10821 rs6000_eliminate_indexed_memrefs (operands);
10822 break;
10824 case E_PTImode:
10825 rs6000_eliminate_indexed_memrefs (operands);
10826 break;
10828 default:
10829 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10832 /* Above, we may have called force_const_mem which may have returned
10833 an invalid address. If we can, fix this up; otherwise, reload will
10834 have to deal with it. */
10835 if (GET_CODE (operands[1]) == MEM)
10836 operands[1] = validize_mem (operands[1]);
10838 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10841 /* Nonzero if we can use a floating-point register to pass this arg. */
10842 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10843 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10844 && (CUM)->fregno <= FP_ARG_MAX_REG \
10845 && TARGET_HARD_FLOAT)
10847 /* Nonzero if we can use an AltiVec register to pass this arg. */
10848 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10849 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10850 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10851 && TARGET_ALTIVEC_ABI \
10852 && (NAMED))
10854 /* Walk down the type tree of TYPE counting consecutive base elements.
10855 If *MODEP is VOIDmode, then set it to the first valid floating point
10856 or vector type. If a non-floating point or vector type is found, or
10857 if a floating point or vector type that doesn't match a non-VOIDmode
10858 *MODEP is found, then return -1, otherwise return the count in the
10859 sub-tree. */
10861 static int
10862 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10864 machine_mode mode;
10865 HOST_WIDE_INT size;
10867 switch (TREE_CODE (type))
10869 case REAL_TYPE:
10870 mode = TYPE_MODE (type);
10871 if (!SCALAR_FLOAT_MODE_P (mode))
10872 return -1;
10874 if (*modep == VOIDmode)
10875 *modep = mode;
10877 if (*modep == mode)
10878 return 1;
10880 break;
10882 case COMPLEX_TYPE:
10883 mode = TYPE_MODE (TREE_TYPE (type));
10884 if (!SCALAR_FLOAT_MODE_P (mode))
10885 return -1;
10887 if (*modep == VOIDmode)
10888 *modep = mode;
10890 if (*modep == mode)
10891 return 2;
10893 break;
10895 case VECTOR_TYPE:
10896 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10897 return -1;
10899 /* Use V4SImode as representative of all 128-bit vector types. */
10900 size = int_size_in_bytes (type);
10901 switch (size)
10903 case 16:
10904 mode = V4SImode;
10905 break;
10906 default:
10907 return -1;
10910 if (*modep == VOIDmode)
10911 *modep = mode;
10913 /* Vector modes are considered to be opaque: two vectors are
10914 equivalent for the purposes of being homogeneous aggregates
10915 if they are the same size. */
10916 if (*modep == mode)
10917 return 1;
10919 break;
10921 case ARRAY_TYPE:
10923 int count;
10924 tree index = TYPE_DOMAIN (type);
10926 /* Can't handle incomplete types nor sizes that are not
10927 fixed. */
10928 if (!COMPLETE_TYPE_P (type)
10929 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10930 return -1;
10932 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10933 if (count == -1
10934 || !index
10935 || !TYPE_MAX_VALUE (index)
10936 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10937 || !TYPE_MIN_VALUE (index)
10938 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10939 || count < 0)
10940 return -1;
10942 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10943 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10945 /* There must be no padding. */
10946 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10947 return -1;
10949 return count;
10952 case RECORD_TYPE:
10954 int count = 0;
10955 int sub_count;
10956 tree field;
10958 /* Can't handle incomplete types nor sizes that are not
10959 fixed. */
10960 if (!COMPLETE_TYPE_P (type)
10961 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10962 return -1;
10964 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10966 if (TREE_CODE (field) != FIELD_DECL)
10967 continue;
10969 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10970 if (sub_count < 0)
10971 return -1;
10972 count += sub_count;
10975 /* There must be no padding. */
10976 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
10977 return -1;
10979 return count;
10982 case UNION_TYPE:
10983 case QUAL_UNION_TYPE:
10985 /* These aren't very interesting except in a degenerate case. */
10986 int count = 0;
10987 int sub_count;
10988 tree field;
10990 /* Can't handle incomplete types nor sizes that are not
10991 fixed. */
10992 if (!COMPLETE_TYPE_P (type)
10993 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10994 return -1;
10996 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10998 if (TREE_CODE (field) != FIELD_DECL)
10999 continue;
11001 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11002 if (sub_count < 0)
11003 return -1;
11004 count = count > sub_count ? count : sub_count;
11007 /* There must be no padding. */
11008 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11009 return -1;
11011 return count;
11014 default:
11015 break;
11018 return -1;
11021 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11022 float or vector aggregate that shall be passed in FP/vector registers
11023 according to the ELFv2 ABI, return the homogeneous element mode in
11024 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11026 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11028 static bool
11029 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11030 machine_mode *elt_mode,
11031 int *n_elts)
11033 /* Note that we do not accept complex types at the top level as
11034 homogeneous aggregates; these types are handled via the
11035 targetm.calls.split_complex_arg mechanism. Complex types
11036 can be elements of homogeneous aggregates, however. */
11037 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11039 machine_mode field_mode = VOIDmode;
11040 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11042 if (field_count > 0)
11044 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11045 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11047 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11048 up to AGGR_ARG_NUM_REG registers. */
11049 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11051 if (elt_mode)
11052 *elt_mode = field_mode;
11053 if (n_elts)
11054 *n_elts = field_count;
11055 return true;
11060 if (elt_mode)
11061 *elt_mode = mode;
11062 if (n_elts)
11063 *n_elts = 1;
11064 return false;
11067 /* Return a nonzero value to say to return the function value in
11068 memory, just as large structures are always returned. TYPE will be
11069 the data type of the value, and FNTYPE will be the type of the
11070 function doing the returning, or @code{NULL} for libcalls.
11072 The AIX ABI for the RS/6000 specifies that all structures are
11073 returned in memory. The Darwin ABI does the same.
11075 For the Darwin 64 Bit ABI, a function result can be returned in
11076 registers or in memory, depending on the size of the return data
11077 type. If it is returned in registers, the value occupies the same
11078 registers as it would if it were the first and only function
11079 argument. Otherwise, the function places its result in memory at
11080 the location pointed to by GPR3.
11082 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11083 but a draft put them in memory, and GCC used to implement the draft
11084 instead of the final standard. Therefore, aix_struct_return
11085 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11086 compatibility can change DRAFT_V4_STRUCT_RET to override the
11087 default, and -m switches get the final word. See
11088 rs6000_option_override_internal for more details.
11090 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11091 long double support is enabled. These values are returned in memory.
11093 int_size_in_bytes returns -1 for variable size objects, which go in
11094 memory always. The cast to unsigned makes -1 > 8. */
11096 static bool
11097 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11099 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11100 if (TARGET_MACHO
11101 && rs6000_darwin64_abi
11102 && TREE_CODE (type) == RECORD_TYPE
11103 && int_size_in_bytes (type) > 0)
11105 CUMULATIVE_ARGS valcum;
11106 rtx valret;
11108 valcum.words = 0;
11109 valcum.fregno = FP_ARG_MIN_REG;
11110 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11111 /* Do a trial code generation as if this were going to be passed
11112 as an argument; if any part goes in memory, we return NULL. */
11113 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11114 if (valret)
11115 return false;
11116 /* Otherwise fall through to more conventional ABI rules. */
11119 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11120 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11121 NULL, NULL))
11122 return false;
11124 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11125 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11126 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11127 return false;
11129 if (AGGREGATE_TYPE_P (type)
11130 && (aix_struct_return
11131 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11132 return true;
11134 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11135 modes only exist for GCC vector types if -maltivec. */
11136 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11137 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11138 return false;
11140 /* Return synthetic vectors in memory. */
11141 if (TREE_CODE (type) == VECTOR_TYPE
11142 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11144 static bool warned_for_return_big_vectors = false;
11145 if (!warned_for_return_big_vectors)
11147 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11148 "non-standard ABI extension with no compatibility "
11149 "guarantee");
11150 warned_for_return_big_vectors = true;
11152 return true;
11155 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11156 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11157 return true;
11159 return false;
11162 /* Specify whether values returned in registers should be at the most
11163 significant end of a register. We want aggregates returned by
11164 value to match the way aggregates are passed to functions. */
11166 static bool
11167 rs6000_return_in_msb (const_tree valtype)
11169 return (DEFAULT_ABI == ABI_ELFv2
11170 && BYTES_BIG_ENDIAN
11171 && AGGREGATE_TYPE_P (valtype)
11172 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
11175 #ifdef HAVE_AS_GNU_ATTRIBUTE
11176 /* Return TRUE if a call to function FNDECL may be one that
11177 potentially affects the function calling ABI of the object file. */
11179 static bool
11180 call_ABI_of_interest (tree fndecl)
11182 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11184 struct cgraph_node *c_node;
11186 /* Libcalls are always interesting. */
11187 if (fndecl == NULL_TREE)
11188 return true;
11190 /* Any call to an external function is interesting. */
11191 if (DECL_EXTERNAL (fndecl))
11192 return true;
11194 /* Interesting functions that we are emitting in this object file. */
11195 c_node = cgraph_node::get (fndecl);
11196 c_node = c_node->ultimate_alias_target ();
11197 return !c_node->only_called_directly_p ();
11199 return false;
11201 #endif
11203 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11204 for a call to a function whose data type is FNTYPE.
11205 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11207 For incoming args we set the number of arguments in the prototype large
11208 so we never return a PARALLEL. */
11210 void
11211 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11212 rtx libname ATTRIBUTE_UNUSED, int incoming,
11213 int libcall, int n_named_args,
11214 tree fndecl ATTRIBUTE_UNUSED,
11215 machine_mode return_mode ATTRIBUTE_UNUSED)
11217 static CUMULATIVE_ARGS zero_cumulative;
11219 *cum = zero_cumulative;
11220 cum->words = 0;
11221 cum->fregno = FP_ARG_MIN_REG;
11222 cum->vregno = ALTIVEC_ARG_MIN_REG;
11223 cum->prototype = (fntype && prototype_p (fntype));
11224 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11225 ? CALL_LIBCALL : CALL_NORMAL);
11226 cum->sysv_gregno = GP_ARG_MIN_REG;
11227 cum->stdarg = stdarg_p (fntype);
11228 cum->libcall = libcall;
11230 cum->nargs_prototype = 0;
11231 if (incoming || cum->prototype)
11232 cum->nargs_prototype = n_named_args;
11234 /* Check for a longcall attribute. */
11235 if ((!fntype && rs6000_default_long_calls)
11236 || (fntype
11237 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11238 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11239 cum->call_cookie |= CALL_LONG;
11241 if (TARGET_DEBUG_ARG)
11243 fprintf (stderr, "\ninit_cumulative_args:");
11244 if (fntype)
11246 tree ret_type = TREE_TYPE (fntype);
11247 fprintf (stderr, " ret code = %s,",
11248 get_tree_code_name (TREE_CODE (ret_type)));
11251 if (cum->call_cookie & CALL_LONG)
11252 fprintf (stderr, " longcall,");
11254 fprintf (stderr, " proto = %d, nargs = %d\n",
11255 cum->prototype, cum->nargs_prototype);
11258 #ifdef HAVE_AS_GNU_ATTRIBUTE
11259 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11261 cum->escapes = call_ABI_of_interest (fndecl);
11262 if (cum->escapes)
11264 tree return_type;
11266 if (fntype)
11268 return_type = TREE_TYPE (fntype);
11269 return_mode = TYPE_MODE (return_type);
11271 else
11272 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11274 if (return_type != NULL)
11276 if (TREE_CODE (return_type) == RECORD_TYPE
11277 && TYPE_TRANSPARENT_AGGR (return_type))
11279 return_type = TREE_TYPE (first_field (return_type));
11280 return_mode = TYPE_MODE (return_type);
11282 if (AGGREGATE_TYPE_P (return_type)
11283 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11284 <= 8))
11285 rs6000_returns_struct = true;
11287 if (SCALAR_FLOAT_MODE_P (return_mode))
11289 rs6000_passes_float = true;
11290 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11291 && (FLOAT128_IBM_P (return_mode)
11292 || FLOAT128_IEEE_P (return_mode)
11293 || (return_type != NULL
11294 && (TYPE_MAIN_VARIANT (return_type)
11295 == long_double_type_node))))
11296 rs6000_passes_long_double = true;
11298 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11299 || PAIRED_VECTOR_MODE (return_mode))
11300 rs6000_passes_vector = true;
11303 #endif
11305 if (fntype
11306 && !TARGET_ALTIVEC
11307 && TARGET_ALTIVEC_ABI
11308 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11310 error ("cannot return value in vector register because"
11311 " altivec instructions are disabled, use %qs"
11312 " to enable them", "-maltivec");
11316 /* The mode the ABI uses for a word. This is not the same as word_mode
11317 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11319 static scalar_int_mode
11320 rs6000_abi_word_mode (void)
11322 return TARGET_32BIT ? SImode : DImode;
11325 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11326 static char *
11327 rs6000_offload_options (void)
11329 if (TARGET_64BIT)
11330 return xstrdup ("-foffload-abi=lp64");
11331 else
11332 return xstrdup ("-foffload-abi=ilp32");
11335 /* On rs6000, function arguments are promoted, as are function return
11336 values. */
11338 static machine_mode
11339 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11340 machine_mode mode,
11341 int *punsignedp ATTRIBUTE_UNUSED,
11342 const_tree, int)
11344 PROMOTE_MODE (mode, *punsignedp, type);
11346 return mode;
11349 /* Return true if TYPE must be passed on the stack and not in registers. */
11351 static bool
11352 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11354 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11355 return must_pass_in_stack_var_size (mode, type);
11356 else
11357 return must_pass_in_stack_var_size_or_pad (mode, type);
11360 static inline bool
11361 is_complex_IBM_long_double (machine_mode mode)
11363 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11366 /* Whether ABI_V4 passes MODE args to a function in floating point
11367 registers. */
11369 static bool
11370 abi_v4_pass_in_fpr (machine_mode mode)
11372 if (!TARGET_HARD_FLOAT)
11373 return false;
11374 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11375 return true;
11376 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11377 return true;
11378 /* ABI_V4 passes complex IBM long double in 8 gprs.
11379 Stupid, but we can't change the ABI now. */
11380 if (is_complex_IBM_long_double (mode))
11381 return false;
11382 if (FLOAT128_2REG_P (mode))
11383 return true;
11384 if (DECIMAL_FLOAT_MODE_P (mode))
11385 return true;
11386 return false;
11389 /* If defined, a C expression which determines whether, and in which
11390 direction, to pad out an argument with extra space. The value
11391 should be of type `enum direction': either `upward' to pad above
11392 the argument, `downward' to pad below, or `none' to inhibit
11393 padding.
11395 For the AIX ABI structs are always stored left shifted in their
11396 argument slot. */
11398 enum direction
11399 function_arg_padding (machine_mode mode, const_tree type)
11401 #ifndef AGGREGATE_PADDING_FIXED
11402 #define AGGREGATE_PADDING_FIXED 0
11403 #endif
11404 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11405 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11406 #endif
11408 if (!AGGREGATE_PADDING_FIXED)
11410 /* GCC used to pass structures of the same size as integer types as
11411 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11412 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11413 passed padded downward, except that -mstrict-align further
11414 muddied the water in that multi-component structures of 2 and 4
11415 bytes in size were passed padded upward.
11417 The following arranges for best compatibility with previous
11418 versions of gcc, but removes the -mstrict-align dependency. */
11419 if (BYTES_BIG_ENDIAN)
11421 HOST_WIDE_INT size = 0;
11423 if (mode == BLKmode)
11425 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11426 size = int_size_in_bytes (type);
11428 else
11429 size = GET_MODE_SIZE (mode);
11431 if (size == 1 || size == 2 || size == 4)
11432 return downward;
11434 return upward;
11437 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11439 if (type != 0 && AGGREGATE_TYPE_P (type))
11440 return upward;
11443 /* Fall back to the default. */
11444 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
11447 /* If defined, a C expression that gives the alignment boundary, in bits,
11448 of an argument with the specified mode and type. If it is not defined,
11449 PARM_BOUNDARY is used for all arguments.
11451 V.4 wants long longs and doubles to be double word aligned. Just
11452 testing the mode size is a boneheaded way to do this as it means
11453 that other types such as complex int are also double word aligned.
11454 However, we're stuck with this because changing the ABI might break
11455 existing library interfaces.
11457 Quadword align Altivec/VSX vectors.
11458 Quadword align large synthetic vector types. */
11460 static unsigned int
11461 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11463 machine_mode elt_mode;
11464 int n_elts;
11466 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11468 if (DEFAULT_ABI == ABI_V4
11469 && (GET_MODE_SIZE (mode) == 8
11470 || (TARGET_HARD_FLOAT
11471 && !is_complex_IBM_long_double (mode)
11472 && FLOAT128_2REG_P (mode))))
11473 return 64;
11474 else if (FLOAT128_VECTOR_P (mode))
11475 return 128;
11476 else if (PAIRED_VECTOR_MODE (mode)
11477 || (type && TREE_CODE (type) == VECTOR_TYPE
11478 && int_size_in_bytes (type) >= 8
11479 && int_size_in_bytes (type) < 16))
11480 return 64;
11481 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11482 || (type && TREE_CODE (type) == VECTOR_TYPE
11483 && int_size_in_bytes (type) >= 16))
11484 return 128;
11486 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11487 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11488 -mcompat-align-parm is used. */
11489 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11490 || DEFAULT_ABI == ABI_ELFv2)
11491 && type && TYPE_ALIGN (type) > 64)
11493 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11494 or homogeneous float/vector aggregates here. We already handled
11495 vector aggregates above, but still need to check for float here. */
11496 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11497 && !SCALAR_FLOAT_MODE_P (elt_mode));
11499 /* We used to check for BLKmode instead of the above aggregate type
11500 check. Warn when this results in any difference to the ABI. */
11501 if (aggregate_p != (mode == BLKmode))
11503 static bool warned;
11504 if (!warned && warn_psabi)
11506 warned = true;
11507 inform (input_location,
11508 "the ABI of passing aggregates with %d-byte alignment"
11509 " has changed in GCC 5",
11510 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11514 if (aggregate_p)
11515 return 128;
11518 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11519 implement the "aggregate type" check as a BLKmode check here; this
11520 means certain aggregate types are in fact not aligned. */
11521 if (TARGET_MACHO && rs6000_darwin64_abi
11522 && mode == BLKmode
11523 && type && TYPE_ALIGN (type) > 64)
11524 return 128;
11526 return PARM_BOUNDARY;
11529 /* The offset in words to the start of the parameter save area. */
11531 static unsigned int
11532 rs6000_parm_offset (void)
11534 return (DEFAULT_ABI == ABI_V4 ? 2
11535 : DEFAULT_ABI == ABI_ELFv2 ? 4
11536 : 6);
11539 /* For a function parm of MODE and TYPE, return the starting word in
11540 the parameter area. NWORDS of the parameter area are already used. */
11542 static unsigned int
11543 rs6000_parm_start (machine_mode mode, const_tree type,
11544 unsigned int nwords)
11546 unsigned int align;
11548 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11549 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11552 /* Compute the size (in words) of a function argument. */
11554 static unsigned long
11555 rs6000_arg_size (machine_mode mode, const_tree type)
11557 unsigned long size;
11559 if (mode != BLKmode)
11560 size = GET_MODE_SIZE (mode);
11561 else
11562 size = int_size_in_bytes (type);
11564 if (TARGET_32BIT)
11565 return (size + 3) >> 2;
11566 else
11567 return (size + 7) >> 3;
11570 /* Use this to flush pending int fields. */
11572 static void
11573 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11574 HOST_WIDE_INT bitpos, int final)
11576 unsigned int startbit, endbit;
11577 int intregs, intoffset;
11578 machine_mode mode;
11580 /* Handle the situations where a float is taking up the first half
11581 of the GPR, and the other half is empty (typically due to
11582 alignment restrictions). We can detect this by a 8-byte-aligned
11583 int field, or by seeing that this is the final flush for this
11584 argument. Count the word and continue on. */
11585 if (cum->floats_in_gpr == 1
11586 && (cum->intoffset % 64 == 0
11587 || (cum->intoffset == -1 && final)))
11589 cum->words++;
11590 cum->floats_in_gpr = 0;
11593 if (cum->intoffset == -1)
11594 return;
11596 intoffset = cum->intoffset;
11597 cum->intoffset = -1;
11598 cum->floats_in_gpr = 0;
11600 if (intoffset % BITS_PER_WORD != 0)
11602 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11603 MODE_INT, 0);
11604 if (mode == BLKmode)
11606 /* We couldn't find an appropriate mode, which happens,
11607 e.g., in packed structs when there are 3 bytes to load.
11608 Back intoffset back to the beginning of the word in this
11609 case. */
11610 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11614 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11615 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11616 intregs = (endbit - startbit) / BITS_PER_WORD;
11617 cum->words += intregs;
11618 /* words should be unsigned. */
11619 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11621 int pad = (endbit/BITS_PER_WORD) - cum->words;
11622 cum->words += pad;
11626 /* The darwin64 ABI calls for us to recurse down through structs,
11627 looking for elements passed in registers. Unfortunately, we have
11628 to track int register count here also because of misalignments
11629 in powerpc alignment mode. */
11631 static void
11632 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11633 const_tree type,
11634 HOST_WIDE_INT startbitpos)
11636 tree f;
11638 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11639 if (TREE_CODE (f) == FIELD_DECL)
11641 HOST_WIDE_INT bitpos = startbitpos;
11642 tree ftype = TREE_TYPE (f);
11643 machine_mode mode;
11644 if (ftype == error_mark_node)
11645 continue;
11646 mode = TYPE_MODE (ftype);
11648 if (DECL_SIZE (f) != 0
11649 && tree_fits_uhwi_p (bit_position (f)))
11650 bitpos += int_bit_position (f);
11652 /* ??? FIXME: else assume zero offset. */
11654 if (TREE_CODE (ftype) == RECORD_TYPE)
11655 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11656 else if (USE_FP_FOR_ARG_P (cum, mode))
11658 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11659 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11660 cum->fregno += n_fpregs;
11661 /* Single-precision floats present a special problem for
11662 us, because they are smaller than an 8-byte GPR, and so
11663 the structure-packing rules combined with the standard
11664 varargs behavior mean that we want to pack float/float
11665 and float/int combinations into a single register's
11666 space. This is complicated by the arg advance flushing,
11667 which works on arbitrarily large groups of int-type
11668 fields. */
11669 if (mode == SFmode)
11671 if (cum->floats_in_gpr == 1)
11673 /* Two floats in a word; count the word and reset
11674 the float count. */
11675 cum->words++;
11676 cum->floats_in_gpr = 0;
11678 else if (bitpos % 64 == 0)
11680 /* A float at the beginning of an 8-byte word;
11681 count it and put off adjusting cum->words until
11682 we see if a arg advance flush is going to do it
11683 for us. */
11684 cum->floats_in_gpr++;
11686 else
11688 /* The float is at the end of a word, preceded
11689 by integer fields, so the arg advance flush
11690 just above has already set cum->words and
11691 everything is taken care of. */
11694 else
11695 cum->words += n_fpregs;
11697 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11699 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11700 cum->vregno++;
11701 cum->words += 2;
11703 else if (cum->intoffset == -1)
11704 cum->intoffset = bitpos;
11708 /* Check for an item that needs to be considered specially under the darwin 64
11709 bit ABI. These are record types where the mode is BLK or the structure is
11710 8 bytes in size. */
11711 static int
11712 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11714 return rs6000_darwin64_abi
11715 && ((mode == BLKmode
11716 && TREE_CODE (type) == RECORD_TYPE
11717 && int_size_in_bytes (type) > 0)
11718 || (type && TREE_CODE (type) == RECORD_TYPE
11719 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11722 /* Update the data in CUM to advance over an argument
11723 of mode MODE and data type TYPE.
11724 (TYPE is null for libcalls where that information may not be available.)
11726 Note that for args passed by reference, function_arg will be called
11727 with MODE and TYPE set to that of the pointer to the arg, not the arg
11728 itself. */
11730 static void
11731 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11732 const_tree type, bool named, int depth)
11734 machine_mode elt_mode;
11735 int n_elts;
11737 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11739 /* Only tick off an argument if we're not recursing. */
11740 if (depth == 0)
11741 cum->nargs_prototype--;
11743 #ifdef HAVE_AS_GNU_ATTRIBUTE
11744 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11745 && cum->escapes)
11747 if (SCALAR_FLOAT_MODE_P (mode))
11749 rs6000_passes_float = true;
11750 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11751 && (FLOAT128_IBM_P (mode)
11752 || FLOAT128_IEEE_P (mode)
11753 || (type != NULL
11754 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11755 rs6000_passes_long_double = true;
11757 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11758 || (PAIRED_VECTOR_MODE (mode)
11759 && !cum->stdarg
11760 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11761 rs6000_passes_vector = true;
11763 #endif
11765 if (TARGET_ALTIVEC_ABI
11766 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11767 || (type && TREE_CODE (type) == VECTOR_TYPE
11768 && int_size_in_bytes (type) == 16)))
11770 bool stack = false;
11772 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11774 cum->vregno += n_elts;
11776 if (!TARGET_ALTIVEC)
11777 error ("cannot pass argument in vector register because"
11778 " altivec instructions are disabled, use %qs"
11779 " to enable them", "-maltivec");
11781 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11782 even if it is going to be passed in a vector register.
11783 Darwin does the same for variable-argument functions. */
11784 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11785 && TARGET_64BIT)
11786 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11787 stack = true;
11789 else
11790 stack = true;
11792 if (stack)
11794 int align;
11796 /* Vector parameters must be 16-byte aligned. In 32-bit
11797 mode this means we need to take into account the offset
11798 to the parameter save area. In 64-bit mode, they just
11799 have to start on an even word, since the parameter save
11800 area is 16-byte aligned. */
11801 if (TARGET_32BIT)
11802 align = -(rs6000_parm_offset () + cum->words) & 3;
11803 else
11804 align = cum->words & 1;
11805 cum->words += align + rs6000_arg_size (mode, type);
11807 if (TARGET_DEBUG_ARG)
11809 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11810 cum->words, align);
11811 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11812 cum->nargs_prototype, cum->prototype,
11813 GET_MODE_NAME (mode));
11817 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11819 int size = int_size_in_bytes (type);
11820 /* Variable sized types have size == -1 and are
11821 treated as if consisting entirely of ints.
11822 Pad to 16 byte boundary if needed. */
11823 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11824 && (cum->words % 2) != 0)
11825 cum->words++;
11826 /* For varargs, we can just go up by the size of the struct. */
11827 if (!named)
11828 cum->words += (size + 7) / 8;
11829 else
11831 /* It is tempting to say int register count just goes up by
11832 sizeof(type)/8, but this is wrong in a case such as
11833 { int; double; int; } [powerpc alignment]. We have to
11834 grovel through the fields for these too. */
11835 cum->intoffset = 0;
11836 cum->floats_in_gpr = 0;
11837 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11838 rs6000_darwin64_record_arg_advance_flush (cum,
11839 size * BITS_PER_UNIT, 1);
11841 if (TARGET_DEBUG_ARG)
11843 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11844 cum->words, TYPE_ALIGN (type), size);
11845 fprintf (stderr,
11846 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11847 cum->nargs_prototype, cum->prototype,
11848 GET_MODE_NAME (mode));
11851 else if (DEFAULT_ABI == ABI_V4)
11853 if (abi_v4_pass_in_fpr (mode))
11855 /* _Decimal128 must use an even/odd register pair. This assumes
11856 that the register number is odd when fregno is odd. */
11857 if (mode == TDmode && (cum->fregno % 2) == 1)
11858 cum->fregno++;
11860 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11861 <= FP_ARG_V4_MAX_REG)
11862 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11863 else
11865 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11866 if (mode == DFmode || FLOAT128_IBM_P (mode)
11867 || mode == DDmode || mode == TDmode)
11868 cum->words += cum->words & 1;
11869 cum->words += rs6000_arg_size (mode, type);
11872 else
11874 int n_words = rs6000_arg_size (mode, type);
11875 int gregno = cum->sysv_gregno;
11877 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11878 As does any other 2 word item such as complex int due to a
11879 historical mistake. */
11880 if (n_words == 2)
11881 gregno += (1 - gregno) & 1;
11883 /* Multi-reg args are not split between registers and stack. */
11884 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11886 /* Long long is aligned on the stack. So are other 2 word
11887 items such as complex int due to a historical mistake. */
11888 if (n_words == 2)
11889 cum->words += cum->words & 1;
11890 cum->words += n_words;
11893 /* Note: continuing to accumulate gregno past when we've started
11894 spilling to the stack indicates the fact that we've started
11895 spilling to the stack to expand_builtin_saveregs. */
11896 cum->sysv_gregno = gregno + n_words;
11899 if (TARGET_DEBUG_ARG)
11901 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11902 cum->words, cum->fregno);
11903 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11904 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11905 fprintf (stderr, "mode = %4s, named = %d\n",
11906 GET_MODE_NAME (mode), named);
11909 else
11911 int n_words = rs6000_arg_size (mode, type);
11912 int start_words = cum->words;
11913 int align_words = rs6000_parm_start (mode, type, start_words);
11915 cum->words = align_words + n_words;
11917 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11919 /* _Decimal128 must be passed in an even/odd float register pair.
11920 This assumes that the register number is odd when fregno is
11921 odd. */
11922 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11923 cum->fregno++;
11924 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11927 if (TARGET_DEBUG_ARG)
11929 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11930 cum->words, cum->fregno);
11931 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11932 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11933 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11934 named, align_words - start_words, depth);
11939 static void
11940 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11941 const_tree type, bool named)
11943 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11947 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11948 structure between cum->intoffset and bitpos to integer registers. */
11950 static void
11951 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11952 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11954 machine_mode mode;
11955 unsigned int regno;
11956 unsigned int startbit, endbit;
11957 int this_regno, intregs, intoffset;
11958 rtx reg;
11960 if (cum->intoffset == -1)
11961 return;
11963 intoffset = cum->intoffset;
11964 cum->intoffset = -1;
11966 /* If this is the trailing part of a word, try to only load that
11967 much into the register. Otherwise load the whole register. Note
11968 that in the latter case we may pick up unwanted bits. It's not a
11969 problem at the moment but may wish to revisit. */
11971 if (intoffset % BITS_PER_WORD != 0)
11973 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11974 MODE_INT, 0);
11975 if (mode == BLKmode)
11977 /* We couldn't find an appropriate mode, which happens,
11978 e.g., in packed structs when there are 3 bytes to load.
11979 Back intoffset back to the beginning of the word in this
11980 case. */
11981 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11982 mode = word_mode;
11985 else
11986 mode = word_mode;
11988 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11989 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11990 intregs = (endbit - startbit) / BITS_PER_WORD;
11991 this_regno = cum->words + intoffset / BITS_PER_WORD;
11993 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11994 cum->use_stack = 1;
11996 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11997 if (intregs <= 0)
11998 return;
12000 intoffset /= BITS_PER_UNIT;
12003 regno = GP_ARG_MIN_REG + this_regno;
12004 reg = gen_rtx_REG (mode, regno);
12005 rvec[(*k)++] =
12006 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12008 this_regno += 1;
12009 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12010 mode = word_mode;
12011 intregs -= 1;
12013 while (intregs > 0);
12016 /* Recursive workhorse for the following. */
12018 static void
12019 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12020 HOST_WIDE_INT startbitpos, rtx rvec[],
12021 int *k)
12023 tree f;
12025 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12026 if (TREE_CODE (f) == FIELD_DECL)
12028 HOST_WIDE_INT bitpos = startbitpos;
12029 tree ftype = TREE_TYPE (f);
12030 machine_mode mode;
12031 if (ftype == error_mark_node)
12032 continue;
12033 mode = TYPE_MODE (ftype);
12035 if (DECL_SIZE (f) != 0
12036 && tree_fits_uhwi_p (bit_position (f)))
12037 bitpos += int_bit_position (f);
12039 /* ??? FIXME: else assume zero offset. */
12041 if (TREE_CODE (ftype) == RECORD_TYPE)
12042 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12043 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12045 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12046 #if 0
12047 switch (mode)
12049 case E_SCmode: mode = SFmode; break;
12050 case E_DCmode: mode = DFmode; break;
12051 case E_TCmode: mode = TFmode; break;
12052 default: break;
12054 #endif
12055 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12056 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12058 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12059 && (mode == TFmode || mode == TDmode));
12060 /* Long double or _Decimal128 split over regs and memory. */
12061 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12062 cum->use_stack=1;
12064 rvec[(*k)++]
12065 = gen_rtx_EXPR_LIST (VOIDmode,
12066 gen_rtx_REG (mode, cum->fregno++),
12067 GEN_INT (bitpos / BITS_PER_UNIT));
12068 if (FLOAT128_2REG_P (mode))
12069 cum->fregno++;
12071 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12073 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12074 rvec[(*k)++]
12075 = gen_rtx_EXPR_LIST (VOIDmode,
12076 gen_rtx_REG (mode, cum->vregno++),
12077 GEN_INT (bitpos / BITS_PER_UNIT));
12079 else if (cum->intoffset == -1)
12080 cum->intoffset = bitpos;
12084 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12085 the register(s) to be used for each field and subfield of a struct
12086 being passed by value, along with the offset of where the
12087 register's value may be found in the block. FP fields go in FP
12088 register, vector fields go in vector registers, and everything
12089 else goes in int registers, packed as in memory.
12091 This code is also used for function return values. RETVAL indicates
12092 whether this is the case.
12094 Much of this is taken from the SPARC V9 port, which has a similar
12095 calling convention. */
12097 static rtx
12098 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12099 bool named, bool retval)
12101 rtx rvec[FIRST_PSEUDO_REGISTER];
12102 int k = 1, kbase = 1;
12103 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12104 /* This is a copy; modifications are not visible to our caller. */
12105 CUMULATIVE_ARGS copy_cum = *orig_cum;
12106 CUMULATIVE_ARGS *cum = &copy_cum;
12108 /* Pad to 16 byte boundary if needed. */
12109 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12110 && (cum->words % 2) != 0)
12111 cum->words++;
12113 cum->intoffset = 0;
12114 cum->use_stack = 0;
12115 cum->named = named;
12117 /* Put entries into rvec[] for individual FP and vector fields, and
12118 for the chunks of memory that go in int regs. Note we start at
12119 element 1; 0 is reserved for an indication of using memory, and
12120 may or may not be filled in below. */
12121 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12122 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12124 /* If any part of the struct went on the stack put all of it there.
12125 This hack is because the generic code for
12126 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12127 parts of the struct are not at the beginning. */
12128 if (cum->use_stack)
12130 if (retval)
12131 return NULL_RTX; /* doesn't go in registers at all */
12132 kbase = 0;
12133 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12135 if (k > 1 || cum->use_stack)
12136 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12137 else
12138 return NULL_RTX;
12141 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12143 static rtx
12144 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12145 int align_words)
12147 int n_units;
12148 int i, k;
12149 rtx rvec[GP_ARG_NUM_REG + 1];
12151 if (align_words >= GP_ARG_NUM_REG)
12152 return NULL_RTX;
12154 n_units = rs6000_arg_size (mode, type);
12156 /* Optimize the simple case where the arg fits in one gpr, except in
12157 the case of BLKmode due to assign_parms assuming that registers are
12158 BITS_PER_WORD wide. */
12159 if (n_units == 0
12160 || (n_units == 1 && mode != BLKmode))
12161 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12163 k = 0;
12164 if (align_words + n_units > GP_ARG_NUM_REG)
12165 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12166 using a magic NULL_RTX component.
12167 This is not strictly correct. Only some of the arg belongs in
12168 memory, not all of it. However, the normal scheme using
12169 function_arg_partial_nregs can result in unusual subregs, eg.
12170 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12171 store the whole arg to memory is often more efficient than code
12172 to store pieces, and we know that space is available in the right
12173 place for the whole arg. */
12174 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12176 i = 0;
12179 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12180 rtx off = GEN_INT (i++ * 4);
12181 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12183 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12185 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12188 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12189 but must also be copied into the parameter save area starting at
12190 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12191 to the GPRs and/or memory. Return the number of elements used. */
12193 static int
12194 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12195 int align_words, rtx *rvec)
12197 int k = 0;
12199 if (align_words < GP_ARG_NUM_REG)
12201 int n_words = rs6000_arg_size (mode, type);
12203 if (align_words + n_words > GP_ARG_NUM_REG
12204 || mode == BLKmode
12205 || (TARGET_32BIT && TARGET_POWERPC64))
12207 /* If this is partially on the stack, then we only
12208 include the portion actually in registers here. */
12209 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12210 int i = 0;
12212 if (align_words + n_words > GP_ARG_NUM_REG)
12214 /* Not all of the arg fits in gprs. Say that it goes in memory
12215 too, using a magic NULL_RTX component. Also see comment in
12216 rs6000_mixed_function_arg for why the normal
12217 function_arg_partial_nregs scheme doesn't work in this case. */
12218 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12223 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12224 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12225 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12227 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12229 else
12231 /* The whole arg fits in gprs. */
12232 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12233 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12236 else
12238 /* It's entirely in memory. */
12239 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12242 return k;
12245 /* RVEC is a vector of K components of an argument of mode MODE.
12246 Construct the final function_arg return value from it. */
12248 static rtx
12249 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12251 gcc_assert (k >= 1);
12253 /* Avoid returning a PARALLEL in the trivial cases. */
12254 if (k == 1)
12256 if (XEXP (rvec[0], 0) == NULL_RTX)
12257 return NULL_RTX;
12259 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12260 return XEXP (rvec[0], 0);
12263 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12266 /* Determine where to put an argument to a function.
12267 Value is zero to push the argument on the stack,
12268 or a hard register in which to store the argument.
12270 MODE is the argument's machine mode.
12271 TYPE is the data type of the argument (as a tree).
12272 This is null for libcalls where that information may
12273 not be available.
12274 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12275 the preceding args and about the function being called. It is
12276 not modified in this routine.
12277 NAMED is nonzero if this argument is a named parameter
12278 (otherwise it is an extra parameter matching an ellipsis).
12280 On RS/6000 the first eight words of non-FP are normally in registers
12281 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12282 Under V.4, the first 8 FP args are in registers.
12284 If this is floating-point and no prototype is specified, we use
12285 both an FP and integer register (or possibly FP reg and stack). Library
12286 functions (when CALL_LIBCALL is set) always have the proper types for args,
12287 so we can pass the FP value just in one register. emit_library_function
12288 doesn't support PARALLEL anyway.
12290 Note that for args passed by reference, function_arg will be called
12291 with MODE and TYPE set to that of the pointer to the arg, not the arg
12292 itself. */
12294 static rtx
12295 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12296 const_tree type, bool named)
12298 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12299 enum rs6000_abi abi = DEFAULT_ABI;
12300 machine_mode elt_mode;
12301 int n_elts;
12303 /* Return a marker to indicate whether CR1 needs to set or clear the
12304 bit that V.4 uses to say fp args were passed in registers.
12305 Assume that we don't need the marker for software floating point,
12306 or compiler generated library calls. */
12307 if (mode == VOIDmode)
12309 if (abi == ABI_V4
12310 && (cum->call_cookie & CALL_LIBCALL) == 0
12311 && (cum->stdarg
12312 || (cum->nargs_prototype < 0
12313 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12314 && TARGET_HARD_FLOAT)
12315 return GEN_INT (cum->call_cookie
12316 | ((cum->fregno == FP_ARG_MIN_REG)
12317 ? CALL_V4_SET_FP_ARGS
12318 : CALL_V4_CLEAR_FP_ARGS));
12320 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12323 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12325 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12327 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12328 if (rslt != NULL_RTX)
12329 return rslt;
12330 /* Else fall through to usual handling. */
12333 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12335 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12336 rtx r, off;
12337 int i, k = 0;
12339 /* Do we also need to pass this argument in the parameter save area?
12340 Library support functions for IEEE 128-bit are assumed to not need the
12341 value passed both in GPRs and in vector registers. */
12342 if (TARGET_64BIT && !cum->prototype
12343 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12345 int align_words = ROUND_UP (cum->words, 2);
12346 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12349 /* Describe where this argument goes in the vector registers. */
12350 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12352 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12353 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12354 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12357 return rs6000_finish_function_arg (mode, rvec, k);
12359 else if (TARGET_ALTIVEC_ABI
12360 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12361 || (type && TREE_CODE (type) == VECTOR_TYPE
12362 && int_size_in_bytes (type) == 16)))
12364 if (named || abi == ABI_V4)
12365 return NULL_RTX;
12366 else
12368 /* Vector parameters to varargs functions under AIX or Darwin
12369 get passed in memory and possibly also in GPRs. */
12370 int align, align_words, n_words;
12371 machine_mode part_mode;
12373 /* Vector parameters must be 16-byte aligned. In 32-bit
12374 mode this means we need to take into account the offset
12375 to the parameter save area. In 64-bit mode, they just
12376 have to start on an even word, since the parameter save
12377 area is 16-byte aligned. */
12378 if (TARGET_32BIT)
12379 align = -(rs6000_parm_offset () + cum->words) & 3;
12380 else
12381 align = cum->words & 1;
12382 align_words = cum->words + align;
12384 /* Out of registers? Memory, then. */
12385 if (align_words >= GP_ARG_NUM_REG)
12386 return NULL_RTX;
12388 if (TARGET_32BIT && TARGET_POWERPC64)
12389 return rs6000_mixed_function_arg (mode, type, align_words);
12391 /* The vector value goes in GPRs. Only the part of the
12392 value in GPRs is reported here. */
12393 part_mode = mode;
12394 n_words = rs6000_arg_size (mode, type);
12395 if (align_words + n_words > GP_ARG_NUM_REG)
12396 /* Fortunately, there are only two possibilities, the value
12397 is either wholly in GPRs or half in GPRs and half not. */
12398 part_mode = DImode;
12400 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12404 else if (abi == ABI_V4)
12406 if (abi_v4_pass_in_fpr (mode))
12408 /* _Decimal128 must use an even/odd register pair. This assumes
12409 that the register number is odd when fregno is odd. */
12410 if (mode == TDmode && (cum->fregno % 2) == 1)
12411 cum->fregno++;
12413 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12414 <= FP_ARG_V4_MAX_REG)
12415 return gen_rtx_REG (mode, cum->fregno);
12416 else
12417 return NULL_RTX;
12419 else
12421 int n_words = rs6000_arg_size (mode, type);
12422 int gregno = cum->sysv_gregno;
12424 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12425 As does any other 2 word item such as complex int due to a
12426 historical mistake. */
12427 if (n_words == 2)
12428 gregno += (1 - gregno) & 1;
12430 /* Multi-reg args are not split between registers and stack. */
12431 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12432 return NULL_RTX;
12434 if (TARGET_32BIT && TARGET_POWERPC64)
12435 return rs6000_mixed_function_arg (mode, type,
12436 gregno - GP_ARG_MIN_REG);
12437 return gen_rtx_REG (mode, gregno);
12440 else
12442 int align_words = rs6000_parm_start (mode, type, cum->words);
12444 /* _Decimal128 must be passed in an even/odd float register pair.
12445 This assumes that the register number is odd when fregno is odd. */
12446 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12447 cum->fregno++;
12449 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12451 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12452 rtx r, off;
12453 int i, k = 0;
12454 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12455 int fpr_words;
12457 /* Do we also need to pass this argument in the parameter
12458 save area? */
12459 if (type && (cum->nargs_prototype <= 0
12460 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12461 && TARGET_XL_COMPAT
12462 && align_words >= GP_ARG_NUM_REG)))
12463 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12465 /* Describe where this argument goes in the fprs. */
12466 for (i = 0; i < n_elts
12467 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12469 /* Check if the argument is split over registers and memory.
12470 This can only ever happen for long double or _Decimal128;
12471 complex types are handled via split_complex_arg. */
12472 machine_mode fmode = elt_mode;
12473 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12475 gcc_assert (FLOAT128_2REG_P (fmode));
12476 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12479 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12480 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12481 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12484 /* If there were not enough FPRs to hold the argument, the rest
12485 usually goes into memory. However, if the current position
12486 is still within the register parameter area, a portion may
12487 actually have to go into GPRs.
12489 Note that it may happen that the portion of the argument
12490 passed in the first "half" of the first GPR was already
12491 passed in the last FPR as well.
12493 For unnamed arguments, we already set up GPRs to cover the
12494 whole argument in rs6000_psave_function_arg, so there is
12495 nothing further to do at this point. */
12496 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12497 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12498 && cum->nargs_prototype > 0)
12500 static bool warned;
12502 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12503 int n_words = rs6000_arg_size (mode, type);
12505 align_words += fpr_words;
12506 n_words -= fpr_words;
12510 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12511 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12512 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12514 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12516 if (!warned && warn_psabi)
12518 warned = true;
12519 inform (input_location,
12520 "the ABI of passing homogeneous float aggregates"
12521 " has changed in GCC 5");
12525 return rs6000_finish_function_arg (mode, rvec, k);
12527 else if (align_words < GP_ARG_NUM_REG)
12529 if (TARGET_32BIT && TARGET_POWERPC64)
12530 return rs6000_mixed_function_arg (mode, type, align_words);
12532 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12534 else
12535 return NULL_RTX;
12539 /* For an arg passed partly in registers and partly in memory, this is
12540 the number of bytes passed in registers. For args passed entirely in
12541 registers or entirely in memory, zero. When an arg is described by a
12542 PARALLEL, perhaps using more than one register type, this function
12543 returns the number of bytes used by the first element of the PARALLEL. */
12545 static int
12546 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12547 tree type, bool named)
12549 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12550 bool passed_in_gprs = true;
12551 int ret = 0;
12552 int align_words;
12553 machine_mode elt_mode;
12554 int n_elts;
12556 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12558 if (DEFAULT_ABI == ABI_V4)
12559 return 0;
12561 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12563 /* If we are passing this arg in the fixed parameter save area (gprs or
12564 memory) as well as VRs, we do not use the partial bytes mechanism;
12565 instead, rs6000_function_arg will return a PARALLEL including a memory
12566 element as necessary. Library support functions for IEEE 128-bit are
12567 assumed to not need the value passed both in GPRs and in vector
12568 registers. */
12569 if (TARGET_64BIT && !cum->prototype
12570 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12571 return 0;
12573 /* Otherwise, we pass in VRs only. Check for partial copies. */
12574 passed_in_gprs = false;
12575 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12576 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12579 /* In this complicated case we just disable the partial_nregs code. */
12580 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12581 return 0;
12583 align_words = rs6000_parm_start (mode, type, cum->words);
12585 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12587 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12589 /* If we are passing this arg in the fixed parameter save area
12590 (gprs or memory) as well as FPRs, we do not use the partial
12591 bytes mechanism; instead, rs6000_function_arg will return a
12592 PARALLEL including a memory element as necessary. */
12593 if (type
12594 && (cum->nargs_prototype <= 0
12595 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12596 && TARGET_XL_COMPAT
12597 && align_words >= GP_ARG_NUM_REG)))
12598 return 0;
12600 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12601 passed_in_gprs = false;
12602 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12604 /* Compute number of bytes / words passed in FPRs. If there
12605 is still space available in the register parameter area
12606 *after* that amount, a part of the argument will be passed
12607 in GPRs. In that case, the total amount passed in any
12608 registers is equal to the amount that would have been passed
12609 in GPRs if everything were passed there, so we fall back to
12610 the GPR code below to compute the appropriate value. */
12611 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12612 * MIN (8, GET_MODE_SIZE (elt_mode)));
12613 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12615 if (align_words + fpr_words < GP_ARG_NUM_REG)
12616 passed_in_gprs = true;
12617 else
12618 ret = fpr;
12622 if (passed_in_gprs
12623 && align_words < GP_ARG_NUM_REG
12624 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12625 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12627 if (ret != 0 && TARGET_DEBUG_ARG)
12628 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12630 return ret;
12633 /* A C expression that indicates when an argument must be passed by
12634 reference. If nonzero for an argument, a copy of that argument is
12635 made in memory and a pointer to the argument is passed instead of
12636 the argument itself. The pointer is passed in whatever way is
12637 appropriate for passing a pointer to that type.
12639 Under V.4, aggregates and long double are passed by reference.
12641 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12642 reference unless the AltiVec vector extension ABI is in force.
12644 As an extension to all ABIs, variable sized types are passed by
12645 reference. */
12647 static bool
12648 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12649 machine_mode mode, const_tree type,
12650 bool named ATTRIBUTE_UNUSED)
12652 if (!type)
12653 return 0;
12655 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12656 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12658 if (TARGET_DEBUG_ARG)
12659 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12660 return 1;
12663 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12665 if (TARGET_DEBUG_ARG)
12666 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12667 return 1;
12670 if (int_size_in_bytes (type) < 0)
12672 if (TARGET_DEBUG_ARG)
12673 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12674 return 1;
12677 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12678 modes only exist for GCC vector types if -maltivec. */
12679 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12681 if (TARGET_DEBUG_ARG)
12682 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12683 return 1;
12686 /* Pass synthetic vectors in memory. */
12687 if (TREE_CODE (type) == VECTOR_TYPE
12688 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12690 static bool warned_for_pass_big_vectors = false;
12691 if (TARGET_DEBUG_ARG)
12692 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12693 if (!warned_for_pass_big_vectors)
12695 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12696 "non-standard ABI extension with no compatibility "
12697 "guarantee");
12698 warned_for_pass_big_vectors = true;
12700 return 1;
12703 return 0;
12706 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12707 already processes. Return true if the parameter must be passed
12708 (fully or partially) on the stack. */
12710 static bool
12711 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12713 machine_mode mode;
12714 int unsignedp;
12715 rtx entry_parm;
12717 /* Catch errors. */
12718 if (type == NULL || type == error_mark_node)
12719 return true;
12721 /* Handle types with no storage requirement. */
12722 if (TYPE_MODE (type) == VOIDmode)
12723 return false;
12725 /* Handle complex types. */
12726 if (TREE_CODE (type) == COMPLEX_TYPE)
12727 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12728 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12730 /* Handle transparent aggregates. */
12731 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12732 && TYPE_TRANSPARENT_AGGR (type))
12733 type = TREE_TYPE (first_field (type));
12735 /* See if this arg was passed by invisible reference. */
12736 if (pass_by_reference (get_cumulative_args (args_so_far),
12737 TYPE_MODE (type), type, true))
12738 type = build_pointer_type (type);
12740 /* Find mode as it is passed by the ABI. */
12741 unsignedp = TYPE_UNSIGNED (type);
12742 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12744 /* If we must pass in stack, we need a stack. */
12745 if (rs6000_must_pass_in_stack (mode, type))
12746 return true;
12748 /* If there is no incoming register, we need a stack. */
12749 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12750 if (entry_parm == NULL)
12751 return true;
12753 /* Likewise if we need to pass both in registers and on the stack. */
12754 if (GET_CODE (entry_parm) == PARALLEL
12755 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12756 return true;
12758 /* Also true if we're partially in registers and partially not. */
12759 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12760 return true;
12762 /* Update info on where next arg arrives in registers. */
12763 rs6000_function_arg_advance (args_so_far, mode, type, true);
12764 return false;
12767 /* Return true if FUN has no prototype, has a variable argument
12768 list, or passes any parameter in memory. */
12770 static bool
12771 rs6000_function_parms_need_stack (tree fun, bool incoming)
12773 tree fntype, result;
12774 CUMULATIVE_ARGS args_so_far_v;
12775 cumulative_args_t args_so_far;
12777 if (!fun)
12778 /* Must be a libcall, all of which only use reg parms. */
12779 return false;
12781 fntype = fun;
12782 if (!TYPE_P (fun))
12783 fntype = TREE_TYPE (fun);
12785 /* Varargs functions need the parameter save area. */
12786 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12787 return true;
12789 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12790 args_so_far = pack_cumulative_args (&args_so_far_v);
12792 /* When incoming, we will have been passed the function decl.
12793 It is necessary to use the decl to handle K&R style functions,
12794 where TYPE_ARG_TYPES may not be available. */
12795 if (incoming)
12797 gcc_assert (DECL_P (fun));
12798 result = DECL_RESULT (fun);
12800 else
12801 result = TREE_TYPE (fntype);
12803 if (result && aggregate_value_p (result, fntype))
12805 if (!TYPE_P (result))
12806 result = TREE_TYPE (result);
12807 result = build_pointer_type (result);
12808 rs6000_parm_needs_stack (args_so_far, result);
12811 if (incoming)
12813 tree parm;
12815 for (parm = DECL_ARGUMENTS (fun);
12816 parm && parm != void_list_node;
12817 parm = TREE_CHAIN (parm))
12818 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12819 return true;
12821 else
12823 function_args_iterator args_iter;
12824 tree arg_type;
12826 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12827 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12828 return true;
12831 return false;
12834 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12835 usually a constant depending on the ABI. However, in the ELFv2 ABI
12836 the register parameter area is optional when calling a function that
12837 has a prototype is scope, has no variable argument list, and passes
12838 all parameters in registers. */
12841 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12843 int reg_parm_stack_space;
12845 switch (DEFAULT_ABI)
12847 default:
12848 reg_parm_stack_space = 0;
12849 break;
12851 case ABI_AIX:
12852 case ABI_DARWIN:
12853 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12854 break;
12856 case ABI_ELFv2:
12857 /* ??? Recomputing this every time is a bit expensive. Is there
12858 a place to cache this information? */
12859 if (rs6000_function_parms_need_stack (fun, incoming))
12860 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12861 else
12862 reg_parm_stack_space = 0;
12863 break;
12866 return reg_parm_stack_space;
12869 static void
12870 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12872 int i;
12873 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12875 if (nregs == 0)
12876 return;
12878 for (i = 0; i < nregs; i++)
12880 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12881 if (reload_completed)
12883 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12884 tem = NULL_RTX;
12885 else
12886 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12887 i * GET_MODE_SIZE (reg_mode));
12889 else
12890 tem = replace_equiv_address (tem, XEXP (tem, 0));
12892 gcc_assert (tem);
12894 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12898 /* Perform any needed actions needed for a function that is receiving a
12899 variable number of arguments.
12901 CUM is as above.
12903 MODE and TYPE are the mode and type of the current parameter.
12905 PRETEND_SIZE is a variable that should be set to the amount of stack
12906 that must be pushed by the prolog to pretend that our caller pushed
12909 Normally, this macro will push all remaining incoming registers on the
12910 stack and set PRETEND_SIZE to the length of the registers pushed. */
12912 static void
12913 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12914 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12915 int no_rtl)
12917 CUMULATIVE_ARGS next_cum;
12918 int reg_size = TARGET_32BIT ? 4 : 8;
12919 rtx save_area = NULL_RTX, mem;
12920 int first_reg_offset;
12921 alias_set_type set;
12923 /* Skip the last named argument. */
12924 next_cum = *get_cumulative_args (cum);
12925 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12927 if (DEFAULT_ABI == ABI_V4)
12929 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12931 if (! no_rtl)
12933 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12934 HOST_WIDE_INT offset = 0;
12936 /* Try to optimize the size of the varargs save area.
12937 The ABI requires that ap.reg_save_area is doubleword
12938 aligned, but we don't need to allocate space for all
12939 the bytes, only those to which we actually will save
12940 anything. */
12941 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12942 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12943 if (TARGET_HARD_FLOAT
12944 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12945 && cfun->va_list_fpr_size)
12947 if (gpr_reg_num)
12948 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12949 * UNITS_PER_FP_WORD;
12950 if (cfun->va_list_fpr_size
12951 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12952 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12953 else
12954 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12955 * UNITS_PER_FP_WORD;
12957 if (gpr_reg_num)
12959 offset = -((first_reg_offset * reg_size) & ~7);
12960 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12962 gpr_reg_num = cfun->va_list_gpr_size;
12963 if (reg_size == 4 && (first_reg_offset & 1))
12964 gpr_reg_num++;
12966 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12968 else if (fpr_size)
12969 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12970 * UNITS_PER_FP_WORD
12971 - (int) (GP_ARG_NUM_REG * reg_size);
12973 if (gpr_size + fpr_size)
12975 rtx reg_save_area
12976 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12977 gcc_assert (GET_CODE (reg_save_area) == MEM);
12978 reg_save_area = XEXP (reg_save_area, 0);
12979 if (GET_CODE (reg_save_area) == PLUS)
12981 gcc_assert (XEXP (reg_save_area, 0)
12982 == virtual_stack_vars_rtx);
12983 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12984 offset += INTVAL (XEXP (reg_save_area, 1));
12986 else
12987 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12990 cfun->machine->varargs_save_offset = offset;
12991 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12994 else
12996 first_reg_offset = next_cum.words;
12997 save_area = crtl->args.internal_arg_pointer;
12999 if (targetm.calls.must_pass_in_stack (mode, type))
13000 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13003 set = get_varargs_alias_set ();
13004 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13005 && cfun->va_list_gpr_size)
13007 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13009 if (va_list_gpr_counter_field)
13010 /* V4 va_list_gpr_size counts number of registers needed. */
13011 n_gpr = cfun->va_list_gpr_size;
13012 else
13013 /* char * va_list instead counts number of bytes needed. */
13014 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13016 if (nregs > n_gpr)
13017 nregs = n_gpr;
13019 mem = gen_rtx_MEM (BLKmode,
13020 plus_constant (Pmode, save_area,
13021 first_reg_offset * reg_size));
13022 MEM_NOTRAP_P (mem) = 1;
13023 set_mem_alias_set (mem, set);
13024 set_mem_align (mem, BITS_PER_WORD);
13026 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13027 nregs);
13030 /* Save FP registers if needed. */
13031 if (DEFAULT_ABI == ABI_V4
13032 && TARGET_HARD_FLOAT
13033 && ! no_rtl
13034 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13035 && cfun->va_list_fpr_size)
13037 int fregno = next_cum.fregno, nregs;
13038 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13039 rtx lab = gen_label_rtx ();
13040 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13041 * UNITS_PER_FP_WORD);
13043 emit_jump_insn
13044 (gen_rtx_SET (pc_rtx,
13045 gen_rtx_IF_THEN_ELSE (VOIDmode,
13046 gen_rtx_NE (VOIDmode, cr1,
13047 const0_rtx),
13048 gen_rtx_LABEL_REF (VOIDmode, lab),
13049 pc_rtx)));
13051 for (nregs = 0;
13052 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13053 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13055 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13056 ? DFmode : SFmode,
13057 plus_constant (Pmode, save_area, off));
13058 MEM_NOTRAP_P (mem) = 1;
13059 set_mem_alias_set (mem, set);
13060 set_mem_align (mem, GET_MODE_ALIGNMENT (
13061 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13062 ? DFmode : SFmode));
13063 emit_move_insn (mem, gen_rtx_REG (
13064 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13065 ? DFmode : SFmode, fregno));
13068 emit_label (lab);
13072 /* Create the va_list data type. */
13074 static tree
13075 rs6000_build_builtin_va_list (void)
13077 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13079 /* For AIX, prefer 'char *' because that's what the system
13080 header files like. */
13081 if (DEFAULT_ABI != ABI_V4)
13082 return build_pointer_type (char_type_node);
13084 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13085 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13086 get_identifier ("__va_list_tag"), record);
13088 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13089 unsigned_char_type_node);
13090 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13091 unsigned_char_type_node);
13092 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13093 every user file. */
13094 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13095 get_identifier ("reserved"), short_unsigned_type_node);
13096 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13097 get_identifier ("overflow_arg_area"),
13098 ptr_type_node);
13099 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13100 get_identifier ("reg_save_area"),
13101 ptr_type_node);
13103 va_list_gpr_counter_field = f_gpr;
13104 va_list_fpr_counter_field = f_fpr;
13106 DECL_FIELD_CONTEXT (f_gpr) = record;
13107 DECL_FIELD_CONTEXT (f_fpr) = record;
13108 DECL_FIELD_CONTEXT (f_res) = record;
13109 DECL_FIELD_CONTEXT (f_ovf) = record;
13110 DECL_FIELD_CONTEXT (f_sav) = record;
13112 TYPE_STUB_DECL (record) = type_decl;
13113 TYPE_NAME (record) = type_decl;
13114 TYPE_FIELDS (record) = f_gpr;
13115 DECL_CHAIN (f_gpr) = f_fpr;
13116 DECL_CHAIN (f_fpr) = f_res;
13117 DECL_CHAIN (f_res) = f_ovf;
13118 DECL_CHAIN (f_ovf) = f_sav;
13120 layout_type (record);
13122 /* The correct type is an array type of one element. */
13123 return build_array_type (record, build_index_type (size_zero_node));
13126 /* Implement va_start. */
13128 static void
13129 rs6000_va_start (tree valist, rtx nextarg)
13131 HOST_WIDE_INT words, n_gpr, n_fpr;
13132 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13133 tree gpr, fpr, ovf, sav, t;
13135 /* Only SVR4 needs something special. */
13136 if (DEFAULT_ABI != ABI_V4)
13138 std_expand_builtin_va_start (valist, nextarg);
13139 return;
13142 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13143 f_fpr = DECL_CHAIN (f_gpr);
13144 f_res = DECL_CHAIN (f_fpr);
13145 f_ovf = DECL_CHAIN (f_res);
13146 f_sav = DECL_CHAIN (f_ovf);
13148 valist = build_simple_mem_ref (valist);
13149 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13150 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13151 f_fpr, NULL_TREE);
13152 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13153 f_ovf, NULL_TREE);
13154 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13155 f_sav, NULL_TREE);
13157 /* Count number of gp and fp argument registers used. */
13158 words = crtl->args.info.words;
13159 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13160 GP_ARG_NUM_REG);
13161 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13162 FP_ARG_NUM_REG);
13164 if (TARGET_DEBUG_ARG)
13165 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13166 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13167 words, n_gpr, n_fpr);
13169 if (cfun->va_list_gpr_size)
13171 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13172 build_int_cst (NULL_TREE, n_gpr));
13173 TREE_SIDE_EFFECTS (t) = 1;
13174 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13177 if (cfun->va_list_fpr_size)
13179 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13180 build_int_cst (NULL_TREE, n_fpr));
13181 TREE_SIDE_EFFECTS (t) = 1;
13182 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13184 #ifdef HAVE_AS_GNU_ATTRIBUTE
13185 if (call_ABI_of_interest (cfun->decl))
13186 rs6000_passes_float = true;
13187 #endif
13190 /* Find the overflow area. */
13191 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13192 if (words != 0)
13193 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13194 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13195 TREE_SIDE_EFFECTS (t) = 1;
13196 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13198 /* If there were no va_arg invocations, don't set up the register
13199 save area. */
13200 if (!cfun->va_list_gpr_size
13201 && !cfun->va_list_fpr_size
13202 && n_gpr < GP_ARG_NUM_REG
13203 && n_fpr < FP_ARG_V4_MAX_REG)
13204 return;
13206 /* Find the register save area. */
13207 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13208 if (cfun->machine->varargs_save_offset)
13209 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13210 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13211 TREE_SIDE_EFFECTS (t) = 1;
13212 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13215 /* Implement va_arg. */
13217 static tree
13218 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13219 gimple_seq *post_p)
13221 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13222 tree gpr, fpr, ovf, sav, reg, t, u;
13223 int size, rsize, n_reg, sav_ofs, sav_scale;
13224 tree lab_false, lab_over, addr;
13225 int align;
13226 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13227 int regalign = 0;
13228 gimple *stmt;
13230 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13232 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13233 return build_va_arg_indirect_ref (t);
13236 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13237 earlier version of gcc, with the property that it always applied alignment
13238 adjustments to the va-args (even for zero-sized types). The cheapest way
13239 to deal with this is to replicate the effect of the part of
13240 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13241 of relevance.
13242 We don't need to check for pass-by-reference because of the test above.
13243 We can return a simplifed answer, since we know there's no offset to add. */
13245 if (((TARGET_MACHO
13246 && rs6000_darwin64_abi)
13247 || DEFAULT_ABI == ABI_ELFv2
13248 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13249 && integer_zerop (TYPE_SIZE (type)))
13251 unsigned HOST_WIDE_INT align, boundary;
13252 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13253 align = PARM_BOUNDARY / BITS_PER_UNIT;
13254 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13255 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13256 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13257 boundary /= BITS_PER_UNIT;
13258 if (boundary > align)
13260 tree t ;
13261 /* This updates arg ptr by the amount that would be necessary
13262 to align the zero-sized (but not zero-alignment) item. */
13263 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13264 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13265 gimplify_and_add (t, pre_p);
13267 t = fold_convert (sizetype, valist_tmp);
13268 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13269 fold_convert (TREE_TYPE (valist),
13270 fold_build2 (BIT_AND_EXPR, sizetype, t,
13271 size_int (-boundary))));
13272 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13273 gimplify_and_add (t, pre_p);
13275 /* Since it is zero-sized there's no increment for the item itself. */
13276 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13277 return build_va_arg_indirect_ref (valist_tmp);
13280 if (DEFAULT_ABI != ABI_V4)
13282 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13284 tree elem_type = TREE_TYPE (type);
13285 machine_mode elem_mode = TYPE_MODE (elem_type);
13286 int elem_size = GET_MODE_SIZE (elem_mode);
13288 if (elem_size < UNITS_PER_WORD)
13290 tree real_part, imag_part;
13291 gimple_seq post = NULL;
13293 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13294 &post);
13295 /* Copy the value into a temporary, lest the formal temporary
13296 be reused out from under us. */
13297 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13298 gimple_seq_add_seq (pre_p, post);
13300 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13301 post_p);
13303 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13307 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13310 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13311 f_fpr = DECL_CHAIN (f_gpr);
13312 f_res = DECL_CHAIN (f_fpr);
13313 f_ovf = DECL_CHAIN (f_res);
13314 f_sav = DECL_CHAIN (f_ovf);
13316 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13317 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13318 f_fpr, NULL_TREE);
13319 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13320 f_ovf, NULL_TREE);
13321 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13322 f_sav, NULL_TREE);
13324 size = int_size_in_bytes (type);
13325 rsize = (size + 3) / 4;
13326 int pad = 4 * rsize - size;
13327 align = 1;
13329 machine_mode mode = TYPE_MODE (type);
13330 if (abi_v4_pass_in_fpr (mode))
13332 /* FP args go in FP registers, if present. */
13333 reg = fpr;
13334 n_reg = (size + 7) / 8;
13335 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13336 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13337 if (mode != SFmode && mode != SDmode)
13338 align = 8;
13340 else
13342 /* Otherwise into GP registers. */
13343 reg = gpr;
13344 n_reg = rsize;
13345 sav_ofs = 0;
13346 sav_scale = 4;
13347 if (n_reg == 2)
13348 align = 8;
13351 /* Pull the value out of the saved registers.... */
13353 lab_over = NULL;
13354 addr = create_tmp_var (ptr_type_node, "addr");
13356 /* AltiVec vectors never go in registers when -mabi=altivec. */
13357 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13358 align = 16;
13359 else
13361 lab_false = create_artificial_label (input_location);
13362 lab_over = create_artificial_label (input_location);
13364 /* Long long is aligned in the registers. As are any other 2 gpr
13365 item such as complex int due to a historical mistake. */
13366 u = reg;
13367 if (n_reg == 2 && reg == gpr)
13369 regalign = 1;
13370 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13371 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13372 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13373 unshare_expr (reg), u);
13375 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13376 reg number is 0 for f1, so we want to make it odd. */
13377 else if (reg == fpr && mode == TDmode)
13379 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13380 build_int_cst (TREE_TYPE (reg), 1));
13381 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13384 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13385 t = build2 (GE_EXPR, boolean_type_node, u, t);
13386 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13387 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13388 gimplify_and_add (t, pre_p);
13390 t = sav;
13391 if (sav_ofs)
13392 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13394 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13395 build_int_cst (TREE_TYPE (reg), n_reg));
13396 u = fold_convert (sizetype, u);
13397 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13398 t = fold_build_pointer_plus (t, u);
13400 /* _Decimal32 varargs are located in the second word of the 64-bit
13401 FP register for 32-bit binaries. */
13402 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13403 t = fold_build_pointer_plus_hwi (t, size);
13405 /* Args are passed right-aligned. */
13406 if (BYTES_BIG_ENDIAN)
13407 t = fold_build_pointer_plus_hwi (t, pad);
13409 gimplify_assign (addr, t, pre_p);
13411 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13413 stmt = gimple_build_label (lab_false);
13414 gimple_seq_add_stmt (pre_p, stmt);
13416 if ((n_reg == 2 && !regalign) || n_reg > 2)
13418 /* Ensure that we don't find any more args in regs.
13419 Alignment has taken care of for special cases. */
13420 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13424 /* ... otherwise out of the overflow area. */
13426 /* Care for on-stack alignment if needed. */
13427 t = ovf;
13428 if (align != 1)
13430 t = fold_build_pointer_plus_hwi (t, align - 1);
13431 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13432 build_int_cst (TREE_TYPE (t), -align));
13435 /* Args are passed right-aligned. */
13436 if (BYTES_BIG_ENDIAN)
13437 t = fold_build_pointer_plus_hwi (t, pad);
13439 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13441 gimplify_assign (unshare_expr (addr), t, pre_p);
13443 t = fold_build_pointer_plus_hwi (t, size);
13444 gimplify_assign (unshare_expr (ovf), t, pre_p);
13446 if (lab_over)
13448 stmt = gimple_build_label (lab_over);
13449 gimple_seq_add_stmt (pre_p, stmt);
13452 if (STRICT_ALIGNMENT
13453 && (TYPE_ALIGN (type)
13454 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13456 /* The value (of type complex double, for example) may not be
13457 aligned in memory in the saved registers, so copy via a
13458 temporary. (This is the same code as used for SPARC.) */
13459 tree tmp = create_tmp_var (type, "va_arg_tmp");
13460 tree dest_addr = build_fold_addr_expr (tmp);
13462 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13463 3, dest_addr, addr, size_int (rsize * 4));
13465 gimplify_and_add (copy, pre_p);
13466 addr = dest_addr;
13469 addr = fold_convert (ptrtype, addr);
13470 return build_va_arg_indirect_ref (addr);
13473 /* Builtins. */
13475 static void
13476 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13478 tree t;
13479 unsigned classify = rs6000_builtin_info[(int)code].attr;
13480 const char *attr_string = "";
13482 gcc_assert (name != NULL);
13483 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13485 if (rs6000_builtin_decls[(int)code])
13486 fatal_error (input_location,
13487 "internal error: builtin function %qs already processed",
13488 name);
13490 rs6000_builtin_decls[(int)code] = t =
13491 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13493 /* Set any special attributes. */
13494 if ((classify & RS6000_BTC_CONST) != 0)
13496 /* const function, function only depends on the inputs. */
13497 TREE_READONLY (t) = 1;
13498 TREE_NOTHROW (t) = 1;
13499 attr_string = ", const";
13501 else if ((classify & RS6000_BTC_PURE) != 0)
13503 /* pure function, function can read global memory, but does not set any
13504 external state. */
13505 DECL_PURE_P (t) = 1;
13506 TREE_NOTHROW (t) = 1;
13507 attr_string = ", pure";
13509 else if ((classify & RS6000_BTC_FP) != 0)
13511 /* Function is a math function. If rounding mode is on, then treat the
13512 function as not reading global memory, but it can have arbitrary side
13513 effects. If it is off, then assume the function is a const function.
13514 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13515 builtin-attribute.def that is used for the math functions. */
13516 TREE_NOTHROW (t) = 1;
13517 if (flag_rounding_math)
13519 DECL_PURE_P (t) = 1;
13520 DECL_IS_NOVOPS (t) = 1;
13521 attr_string = ", fp, pure";
13523 else
13525 TREE_READONLY (t) = 1;
13526 attr_string = ", fp, const";
13529 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13530 gcc_unreachable ();
13532 if (TARGET_DEBUG_BUILTIN)
13533 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13534 (int)code, name, attr_string);
13537 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13539 #undef RS6000_BUILTIN_0
13540 #undef RS6000_BUILTIN_1
13541 #undef RS6000_BUILTIN_2
13542 #undef RS6000_BUILTIN_3
13543 #undef RS6000_BUILTIN_A
13544 #undef RS6000_BUILTIN_D
13545 #undef RS6000_BUILTIN_H
13546 #undef RS6000_BUILTIN_P
13547 #undef RS6000_BUILTIN_Q
13548 #undef RS6000_BUILTIN_X
13550 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13551 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13552 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13553 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13554 { MASK, ICODE, NAME, ENUM },
13556 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13557 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13558 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13559 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13560 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13561 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13563 static const struct builtin_description bdesc_3arg[] =
13565 #include "rs6000-builtin.def"
13568 /* DST operations: void foo (void *, const int, const char). */
13570 #undef RS6000_BUILTIN_0
13571 #undef RS6000_BUILTIN_1
13572 #undef RS6000_BUILTIN_2
13573 #undef RS6000_BUILTIN_3
13574 #undef RS6000_BUILTIN_A
13575 #undef RS6000_BUILTIN_D
13576 #undef RS6000_BUILTIN_H
13577 #undef RS6000_BUILTIN_P
13578 #undef RS6000_BUILTIN_Q
13579 #undef RS6000_BUILTIN_X
13581 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13582 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13583 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13584 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13585 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13586 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13587 { MASK, ICODE, NAME, ENUM },
13589 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13590 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13591 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13592 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13594 static const struct builtin_description bdesc_dst[] =
13596 #include "rs6000-builtin.def"
13599 /* Simple binary operations: VECc = foo (VECa, VECb). */
13601 #undef RS6000_BUILTIN_0
13602 #undef RS6000_BUILTIN_1
13603 #undef RS6000_BUILTIN_2
13604 #undef RS6000_BUILTIN_3
13605 #undef RS6000_BUILTIN_A
13606 #undef RS6000_BUILTIN_D
13607 #undef RS6000_BUILTIN_H
13608 #undef RS6000_BUILTIN_P
13609 #undef RS6000_BUILTIN_Q
13610 #undef RS6000_BUILTIN_X
13612 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13613 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13614 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13615 { MASK, ICODE, NAME, ENUM },
13617 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13618 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13619 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13620 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13621 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13622 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13623 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13625 static const struct builtin_description bdesc_2arg[] =
13627 #include "rs6000-builtin.def"
13630 #undef RS6000_BUILTIN_0
13631 #undef RS6000_BUILTIN_1
13632 #undef RS6000_BUILTIN_2
13633 #undef RS6000_BUILTIN_3
13634 #undef RS6000_BUILTIN_A
13635 #undef RS6000_BUILTIN_D
13636 #undef RS6000_BUILTIN_H
13637 #undef RS6000_BUILTIN_P
13638 #undef RS6000_BUILTIN_Q
13639 #undef RS6000_BUILTIN_X
13641 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13642 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13643 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13644 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13645 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13646 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13647 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13648 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13649 { MASK, ICODE, NAME, ENUM },
13651 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13652 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13654 /* AltiVec predicates. */
13656 static const struct builtin_description bdesc_altivec_preds[] =
13658 #include "rs6000-builtin.def"
13661 /* PAIRED predicates. */
13662 #undef RS6000_BUILTIN_0
13663 #undef RS6000_BUILTIN_1
13664 #undef RS6000_BUILTIN_2
13665 #undef RS6000_BUILTIN_3
13666 #undef RS6000_BUILTIN_A
13667 #undef RS6000_BUILTIN_D
13668 #undef RS6000_BUILTIN_H
13669 #undef RS6000_BUILTIN_P
13670 #undef RS6000_BUILTIN_Q
13671 #undef RS6000_BUILTIN_X
13673 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13674 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13675 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13676 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13677 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13678 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13679 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13680 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13681 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13682 { MASK, ICODE, NAME, ENUM },
13684 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13686 static const struct builtin_description bdesc_paired_preds[] =
13688 #include "rs6000-builtin.def"
13691 /* ABS* operations. */
13693 #undef RS6000_BUILTIN_0
13694 #undef RS6000_BUILTIN_1
13695 #undef RS6000_BUILTIN_2
13696 #undef RS6000_BUILTIN_3
13697 #undef RS6000_BUILTIN_A
13698 #undef RS6000_BUILTIN_D
13699 #undef RS6000_BUILTIN_H
13700 #undef RS6000_BUILTIN_P
13701 #undef RS6000_BUILTIN_Q
13702 #undef RS6000_BUILTIN_X
13704 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13705 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13706 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13707 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13708 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13709 { MASK, ICODE, NAME, ENUM },
13711 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13712 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13713 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13714 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13715 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13717 static const struct builtin_description bdesc_abs[] =
13719 #include "rs6000-builtin.def"
13722 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13723 foo (VECa). */
13725 #undef RS6000_BUILTIN_0
13726 #undef RS6000_BUILTIN_1
13727 #undef RS6000_BUILTIN_2
13728 #undef RS6000_BUILTIN_3
13729 #undef RS6000_BUILTIN_A
13730 #undef RS6000_BUILTIN_D
13731 #undef RS6000_BUILTIN_H
13732 #undef RS6000_BUILTIN_P
13733 #undef RS6000_BUILTIN_Q
13734 #undef RS6000_BUILTIN_X
13736 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13737 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13738 { MASK, ICODE, NAME, ENUM },
13740 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13741 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13742 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13743 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13744 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13745 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13746 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13747 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13749 static const struct builtin_description bdesc_1arg[] =
13751 #include "rs6000-builtin.def"
13754 /* Simple no-argument operations: result = __builtin_darn_32 () */
13756 #undef RS6000_BUILTIN_0
13757 #undef RS6000_BUILTIN_1
13758 #undef RS6000_BUILTIN_2
13759 #undef RS6000_BUILTIN_3
13760 #undef RS6000_BUILTIN_A
13761 #undef RS6000_BUILTIN_D
13762 #undef RS6000_BUILTIN_H
13763 #undef RS6000_BUILTIN_P
13764 #undef RS6000_BUILTIN_Q
13765 #undef RS6000_BUILTIN_X
13767 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13768 { MASK, ICODE, NAME, ENUM },
13770 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13771 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13772 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13773 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13774 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13775 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13776 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13777 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13778 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13780 static const struct builtin_description bdesc_0arg[] =
13782 #include "rs6000-builtin.def"
13785 /* HTM builtins. */
13786 #undef RS6000_BUILTIN_0
13787 #undef RS6000_BUILTIN_1
13788 #undef RS6000_BUILTIN_2
13789 #undef RS6000_BUILTIN_3
13790 #undef RS6000_BUILTIN_A
13791 #undef RS6000_BUILTIN_D
13792 #undef RS6000_BUILTIN_H
13793 #undef RS6000_BUILTIN_P
13794 #undef RS6000_BUILTIN_Q
13795 #undef RS6000_BUILTIN_X
13797 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13798 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13799 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13800 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13801 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13802 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13803 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13804 { MASK, ICODE, NAME, ENUM },
13806 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13807 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13808 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13810 static const struct builtin_description bdesc_htm[] =
13812 #include "rs6000-builtin.def"
13815 #undef RS6000_BUILTIN_0
13816 #undef RS6000_BUILTIN_1
13817 #undef RS6000_BUILTIN_2
13818 #undef RS6000_BUILTIN_3
13819 #undef RS6000_BUILTIN_A
13820 #undef RS6000_BUILTIN_D
13821 #undef RS6000_BUILTIN_H
13822 #undef RS6000_BUILTIN_P
13823 #undef RS6000_BUILTIN_Q
13825 /* Return true if a builtin function is overloaded. */
13826 bool
13827 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13829 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13832 const char *
13833 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13835 return rs6000_builtin_info[(int)fncode].name;
13838 /* Expand an expression EXP that calls a builtin without arguments. */
13839 static rtx
13840 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13842 rtx pat;
13843 machine_mode tmode = insn_data[icode].operand[0].mode;
13845 if (icode == CODE_FOR_nothing)
13846 /* Builtin not supported on this processor. */
13847 return 0;
13849 if (target == 0
13850 || GET_MODE (target) != tmode
13851 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13852 target = gen_reg_rtx (tmode);
13854 pat = GEN_FCN (icode) (target);
13855 if (! pat)
13856 return 0;
13857 emit_insn (pat);
13859 return target;
13863 static rtx
13864 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13866 rtx pat;
13867 tree arg0 = CALL_EXPR_ARG (exp, 0);
13868 tree arg1 = CALL_EXPR_ARG (exp, 1);
13869 rtx op0 = expand_normal (arg0);
13870 rtx op1 = expand_normal (arg1);
13871 machine_mode mode0 = insn_data[icode].operand[0].mode;
13872 machine_mode mode1 = insn_data[icode].operand[1].mode;
13874 if (icode == CODE_FOR_nothing)
13875 /* Builtin not supported on this processor. */
13876 return 0;
13878 /* If we got invalid arguments bail out before generating bad rtl. */
13879 if (arg0 == error_mark_node || arg1 == error_mark_node)
13880 return const0_rtx;
13882 if (GET_CODE (op0) != CONST_INT
13883 || INTVAL (op0) > 255
13884 || INTVAL (op0) < 0)
13886 error ("argument 1 must be an 8-bit field value");
13887 return const0_rtx;
13890 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13891 op0 = copy_to_mode_reg (mode0, op0);
13893 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13894 op1 = copy_to_mode_reg (mode1, op1);
13896 pat = GEN_FCN (icode) (op0, op1);
13897 if (! pat)
13898 return const0_rtx;
13899 emit_insn (pat);
13901 return NULL_RTX;
13904 static rtx
13905 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13907 rtx pat;
13908 tree arg0 = CALL_EXPR_ARG (exp, 0);
13909 rtx op0 = expand_normal (arg0);
13910 machine_mode tmode = insn_data[icode].operand[0].mode;
13911 machine_mode mode0 = insn_data[icode].operand[1].mode;
13913 if (icode == CODE_FOR_nothing)
13914 /* Builtin not supported on this processor. */
13915 return 0;
13917 /* If we got invalid arguments bail out before generating bad rtl. */
13918 if (arg0 == error_mark_node)
13919 return const0_rtx;
13921 if (icode == CODE_FOR_altivec_vspltisb
13922 || icode == CODE_FOR_altivec_vspltish
13923 || icode == CODE_FOR_altivec_vspltisw)
13925 /* Only allow 5-bit *signed* literals. */
13926 if (GET_CODE (op0) != CONST_INT
13927 || INTVAL (op0) > 15
13928 || INTVAL (op0) < -16)
13930 error ("argument 1 must be a 5-bit signed literal");
13931 return CONST0_RTX (tmode);
13935 if (target == 0
13936 || GET_MODE (target) != tmode
13937 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13938 target = gen_reg_rtx (tmode);
13940 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13941 op0 = copy_to_mode_reg (mode0, op0);
13943 pat = GEN_FCN (icode) (target, op0);
13944 if (! pat)
13945 return 0;
13946 emit_insn (pat);
13948 return target;
13951 static rtx
13952 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13954 rtx pat, scratch1, scratch2;
13955 tree arg0 = CALL_EXPR_ARG (exp, 0);
13956 rtx op0 = expand_normal (arg0);
13957 machine_mode tmode = insn_data[icode].operand[0].mode;
13958 machine_mode mode0 = insn_data[icode].operand[1].mode;
13960 /* If we have invalid arguments, bail out before generating bad rtl. */
13961 if (arg0 == error_mark_node)
13962 return const0_rtx;
13964 if (target == 0
13965 || GET_MODE (target) != tmode
13966 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13967 target = gen_reg_rtx (tmode);
13969 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13970 op0 = copy_to_mode_reg (mode0, op0);
13972 scratch1 = gen_reg_rtx (mode0);
13973 scratch2 = gen_reg_rtx (mode0);
13975 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13976 if (! pat)
13977 return 0;
13978 emit_insn (pat);
13980 return target;
13983 static rtx
13984 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13986 rtx pat;
13987 tree arg0 = CALL_EXPR_ARG (exp, 0);
13988 tree arg1 = CALL_EXPR_ARG (exp, 1);
13989 rtx op0 = expand_normal (arg0);
13990 rtx op1 = expand_normal (arg1);
13991 machine_mode tmode = insn_data[icode].operand[0].mode;
13992 machine_mode mode0 = insn_data[icode].operand[1].mode;
13993 machine_mode mode1 = insn_data[icode].operand[2].mode;
13995 if (icode == CODE_FOR_nothing)
13996 /* Builtin not supported on this processor. */
13997 return 0;
13999 /* If we got invalid arguments bail out before generating bad rtl. */
14000 if (arg0 == error_mark_node || arg1 == error_mark_node)
14001 return const0_rtx;
14003 if (icode == CODE_FOR_altivec_vcfux
14004 || icode == CODE_FOR_altivec_vcfsx
14005 || icode == CODE_FOR_altivec_vctsxs
14006 || icode == CODE_FOR_altivec_vctuxs
14007 || icode == CODE_FOR_altivec_vspltb
14008 || icode == CODE_FOR_altivec_vsplth
14009 || icode == CODE_FOR_altivec_vspltw)
14011 /* Only allow 5-bit unsigned literals. */
14012 STRIP_NOPS (arg1);
14013 if (TREE_CODE (arg1) != INTEGER_CST
14014 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14016 error ("argument 2 must be a 5-bit unsigned literal");
14017 return CONST0_RTX (tmode);
14020 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14021 || icode == CODE_FOR_dfptstsfi_lt_dd
14022 || icode == CODE_FOR_dfptstsfi_gt_dd
14023 || icode == CODE_FOR_dfptstsfi_unordered_dd
14024 || icode == CODE_FOR_dfptstsfi_eq_td
14025 || icode == CODE_FOR_dfptstsfi_lt_td
14026 || icode == CODE_FOR_dfptstsfi_gt_td
14027 || icode == CODE_FOR_dfptstsfi_unordered_td)
14029 /* Only allow 6-bit unsigned literals. */
14030 STRIP_NOPS (arg0);
14031 if (TREE_CODE (arg0) != INTEGER_CST
14032 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14034 error ("argument 1 must be a 6-bit unsigned literal");
14035 return CONST0_RTX (tmode);
14038 else if (icode == CODE_FOR_xststdcqp
14039 || icode == CODE_FOR_xststdcdp
14040 || icode == CODE_FOR_xststdcsp
14041 || icode == CODE_FOR_xvtstdcdp
14042 || icode == CODE_FOR_xvtstdcsp)
14044 /* Only allow 7-bit unsigned literals. */
14045 STRIP_NOPS (arg1);
14046 if (TREE_CODE (arg1) != INTEGER_CST
14047 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14049 error ("argument 2 must be a 7-bit unsigned literal");
14050 return CONST0_RTX (tmode);
14053 else if (icode == CODE_FOR_unpackv1ti
14054 || icode == CODE_FOR_unpackkf
14055 || icode == CODE_FOR_unpacktf
14056 || icode == CODE_FOR_unpackif
14057 || icode == CODE_FOR_unpacktd)
14059 /* Only allow 1-bit unsigned literals. */
14060 STRIP_NOPS (arg1);
14061 if (TREE_CODE (arg1) != INTEGER_CST
14062 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14064 error ("argument 2 must be a 1-bit unsigned literal");
14065 return CONST0_RTX (tmode);
14069 if (target == 0
14070 || GET_MODE (target) != tmode
14071 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14072 target = gen_reg_rtx (tmode);
14074 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14075 op0 = copy_to_mode_reg (mode0, op0);
14076 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14077 op1 = copy_to_mode_reg (mode1, op1);
14079 pat = GEN_FCN (icode) (target, op0, op1);
14080 if (! pat)
14081 return 0;
14082 emit_insn (pat);
14084 return target;
14087 static rtx
14088 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14090 rtx pat, scratch;
14091 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14092 tree arg0 = CALL_EXPR_ARG (exp, 1);
14093 tree arg1 = CALL_EXPR_ARG (exp, 2);
14094 rtx op0 = expand_normal (arg0);
14095 rtx op1 = expand_normal (arg1);
14096 machine_mode tmode = SImode;
14097 machine_mode mode0 = insn_data[icode].operand[1].mode;
14098 machine_mode mode1 = insn_data[icode].operand[2].mode;
14099 int cr6_form_int;
14101 if (TREE_CODE (cr6_form) != INTEGER_CST)
14103 error ("argument 1 of %qs must be a constant",
14104 "__builtin_altivec_predicate");
14105 return const0_rtx;
14107 else
14108 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14110 gcc_assert (mode0 == mode1);
14112 /* If we have invalid arguments, bail out before generating bad rtl. */
14113 if (arg0 == error_mark_node || arg1 == error_mark_node)
14114 return const0_rtx;
14116 if (target == 0
14117 || GET_MODE (target) != tmode
14118 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14119 target = gen_reg_rtx (tmode);
14121 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14122 op0 = copy_to_mode_reg (mode0, op0);
14123 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14124 op1 = copy_to_mode_reg (mode1, op1);
14126 /* Note that for many of the relevant operations (e.g. cmpne or
14127 cmpeq) with float or double operands, it makes more sense for the
14128 mode of the allocated scratch register to select a vector of
14129 integer. But the choice to copy the mode of operand 0 was made
14130 long ago and there are no plans to change it. */
14131 scratch = gen_reg_rtx (mode0);
14133 pat = GEN_FCN (icode) (scratch, op0, op1);
14134 if (! pat)
14135 return 0;
14136 emit_insn (pat);
14138 /* The vec_any* and vec_all* predicates use the same opcodes for two
14139 different operations, but the bits in CR6 will be different
14140 depending on what information we want. So we have to play tricks
14141 with CR6 to get the right bits out.
14143 If you think this is disgusting, look at the specs for the
14144 AltiVec predicates. */
14146 switch (cr6_form_int)
14148 case 0:
14149 emit_insn (gen_cr6_test_for_zero (target));
14150 break;
14151 case 1:
14152 emit_insn (gen_cr6_test_for_zero_reverse (target));
14153 break;
14154 case 2:
14155 emit_insn (gen_cr6_test_for_lt (target));
14156 break;
14157 case 3:
14158 emit_insn (gen_cr6_test_for_lt_reverse (target));
14159 break;
14160 default:
14161 error ("argument 1 of %qs is out of range",
14162 "__builtin_altivec_predicate");
14163 break;
14166 return target;
14169 static rtx
14170 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14172 rtx pat, addr;
14173 tree arg0 = CALL_EXPR_ARG (exp, 0);
14174 tree arg1 = CALL_EXPR_ARG (exp, 1);
14175 machine_mode tmode = insn_data[icode].operand[0].mode;
14176 machine_mode mode0 = Pmode;
14177 machine_mode mode1 = Pmode;
14178 rtx op0 = expand_normal (arg0);
14179 rtx op1 = expand_normal (arg1);
14181 if (icode == CODE_FOR_nothing)
14182 /* Builtin not supported on this processor. */
14183 return 0;
14185 /* If we got invalid arguments bail out before generating bad rtl. */
14186 if (arg0 == error_mark_node || arg1 == error_mark_node)
14187 return const0_rtx;
14189 if (target == 0
14190 || GET_MODE (target) != tmode
14191 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14192 target = gen_reg_rtx (tmode);
14194 op1 = copy_to_mode_reg (mode1, op1);
14196 if (op0 == const0_rtx)
14198 addr = gen_rtx_MEM (tmode, op1);
14200 else
14202 op0 = copy_to_mode_reg (mode0, op0);
14203 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14206 pat = GEN_FCN (icode) (target, addr);
14208 if (! pat)
14209 return 0;
14210 emit_insn (pat);
14212 return target;
14215 /* Return a constant vector for use as a little-endian permute control vector
14216 to reverse the order of elements of the given vector mode. */
14217 static rtx
14218 swap_selector_for_mode (machine_mode mode)
14220 /* These are little endian vectors, so their elements are reversed
14221 from what you would normally expect for a permute control vector. */
14222 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14223 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14224 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14225 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14226 unsigned int *swaparray, i;
14227 rtx perm[16];
14229 switch (mode)
14231 case E_V2DFmode:
14232 case E_V2DImode:
14233 swaparray = swap2;
14234 break;
14235 case E_V4SFmode:
14236 case E_V4SImode:
14237 swaparray = swap4;
14238 break;
14239 case E_V8HImode:
14240 swaparray = swap8;
14241 break;
14242 case E_V16QImode:
14243 swaparray = swap16;
14244 break;
14245 default:
14246 gcc_unreachable ();
14249 for (i = 0; i < 16; ++i)
14250 perm[i] = GEN_INT (swaparray[i]);
14252 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14255 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14256 with -maltivec=be specified. Issue the load followed by an element-
14257 reversing permute. */
14258 void
14259 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14261 rtx tmp = gen_reg_rtx (mode);
14262 rtx load = gen_rtx_SET (tmp, op1);
14263 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14264 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14265 rtx sel = swap_selector_for_mode (mode);
14266 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14268 gcc_assert (REG_P (op0));
14269 emit_insn (par);
14270 emit_insn (gen_rtx_SET (op0, vperm));
14273 /* Generate code for a "stvxl" built-in for a little endian target with
14274 -maltivec=be specified. Issue the store preceded by an element-reversing
14275 permute. */
14276 void
14277 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14279 rtx tmp = gen_reg_rtx (mode);
14280 rtx store = gen_rtx_SET (op0, tmp);
14281 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14282 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14283 rtx sel = swap_selector_for_mode (mode);
14284 rtx vperm;
14286 gcc_assert (REG_P (op1));
14287 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14288 emit_insn (gen_rtx_SET (tmp, vperm));
14289 emit_insn (par);
14292 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14293 specified. Issue the store preceded by an element-reversing permute. */
14294 void
14295 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14297 machine_mode inner_mode = GET_MODE_INNER (mode);
14298 rtx tmp = gen_reg_rtx (mode);
14299 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14300 rtx sel = swap_selector_for_mode (mode);
14301 rtx vperm;
14303 gcc_assert (REG_P (op1));
14304 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14305 emit_insn (gen_rtx_SET (tmp, vperm));
14306 emit_insn (gen_rtx_SET (op0, stvx));
14309 static rtx
14310 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14312 rtx pat, addr;
14313 tree arg0 = CALL_EXPR_ARG (exp, 0);
14314 tree arg1 = CALL_EXPR_ARG (exp, 1);
14315 machine_mode tmode = insn_data[icode].operand[0].mode;
14316 machine_mode mode0 = Pmode;
14317 machine_mode mode1 = Pmode;
14318 rtx op0 = expand_normal (arg0);
14319 rtx op1 = expand_normal (arg1);
14321 if (icode == CODE_FOR_nothing)
14322 /* Builtin not supported on this processor. */
14323 return 0;
14325 /* If we got invalid arguments bail out before generating bad rtl. */
14326 if (arg0 == error_mark_node || arg1 == error_mark_node)
14327 return const0_rtx;
14329 if (target == 0
14330 || GET_MODE (target) != tmode
14331 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14332 target = gen_reg_rtx (tmode);
14334 op1 = copy_to_mode_reg (mode1, op1);
14336 /* For LVX, express the RTL accurately by ANDing the address with -16.
14337 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14338 so the raw address is fine. */
14339 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14340 || icode == CODE_FOR_altivec_lvx_v2di_2op
14341 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14342 || icode == CODE_FOR_altivec_lvx_v4si_2op
14343 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14344 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14346 rtx rawaddr;
14347 if (op0 == const0_rtx)
14348 rawaddr = op1;
14349 else
14351 op0 = copy_to_mode_reg (mode0, op0);
14352 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14354 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14355 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14357 /* For -maltivec=be, emit the load and follow it up with a
14358 permute to swap the elements. */
14359 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14361 rtx temp = gen_reg_rtx (tmode);
14362 emit_insn (gen_rtx_SET (temp, addr));
14364 rtx sel = swap_selector_for_mode (tmode);
14365 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14366 UNSPEC_VPERM);
14367 emit_insn (gen_rtx_SET (target, vperm));
14369 else
14370 emit_insn (gen_rtx_SET (target, addr));
14372 else
14374 if (op0 == const0_rtx)
14375 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14376 else
14378 op0 = copy_to_mode_reg (mode0, op0);
14379 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14380 gen_rtx_PLUS (Pmode, op1, op0));
14383 pat = GEN_FCN (icode) (target, addr);
14384 if (! pat)
14385 return 0;
14386 emit_insn (pat);
14389 return target;
14392 static rtx
14393 altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14395 rtx pat, addr;
14396 tree arg0 = CALL_EXPR_ARG (exp, 0);
14397 tree arg1 = CALL_EXPR_ARG (exp, 1);
14398 machine_mode tmode = insn_data[icode].operand[0].mode;
14399 machine_mode mode0 = Pmode;
14400 machine_mode mode1 = Pmode;
14401 rtx op0 = expand_normal (arg0);
14402 rtx op1 = expand_normal (arg1);
14404 if (icode == CODE_FOR_nothing)
14405 /* Builtin not supported on this processor. */
14406 return 0;
14408 /* If we got invalid arguments bail out before generating bad rtl. */
14409 if (arg0 == error_mark_node || arg1 == error_mark_node)
14410 return const0_rtx;
14412 if (target == 0
14413 || GET_MODE (target) != tmode
14414 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14415 target = gen_reg_rtx (tmode);
14417 op1 = copy_to_mode_reg (mode1, op1);
14419 if (op0 == const0_rtx)
14420 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14421 else
14423 op0 = copy_to_mode_reg (mode0, op0);
14424 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14425 gen_rtx_PLUS (Pmode, op1, op0));
14428 pat = GEN_FCN (icode) (target, addr);
14429 if (!pat)
14430 return 0;
14432 emit_insn (pat);
14433 /* Reverse element order of elements if in LE mode */
14434 if (!VECTOR_ELT_ORDER_BIG)
14436 rtx sel = swap_selector_for_mode (tmode);
14437 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
14438 UNSPEC_VPERM);
14439 emit_insn (gen_rtx_SET (target, vperm));
14441 return target;
14444 static rtx
14445 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14447 tree arg0 = CALL_EXPR_ARG (exp, 0);
14448 tree arg1 = CALL_EXPR_ARG (exp, 1);
14449 tree arg2 = CALL_EXPR_ARG (exp, 2);
14450 rtx op0 = expand_normal (arg0);
14451 rtx op1 = expand_normal (arg1);
14452 rtx op2 = expand_normal (arg2);
14453 rtx pat, addr;
14454 machine_mode tmode = insn_data[icode].operand[0].mode;
14455 machine_mode mode1 = Pmode;
14456 machine_mode mode2 = Pmode;
14458 /* Invalid arguments. Bail before doing anything stoopid! */
14459 if (arg0 == error_mark_node
14460 || arg1 == error_mark_node
14461 || arg2 == error_mark_node)
14462 return const0_rtx;
14464 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14465 op0 = copy_to_mode_reg (tmode, op0);
14467 op2 = copy_to_mode_reg (mode2, op2);
14469 if (op1 == const0_rtx)
14471 addr = gen_rtx_MEM (tmode, op2);
14473 else
14475 op1 = copy_to_mode_reg (mode1, op1);
14476 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14479 pat = GEN_FCN (icode) (addr, op0);
14480 if (pat)
14481 emit_insn (pat);
14482 return NULL_RTX;
14485 static rtx
14486 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14488 rtx pat;
14489 tree arg0 = CALL_EXPR_ARG (exp, 0);
14490 tree arg1 = CALL_EXPR_ARG (exp, 1);
14491 tree arg2 = CALL_EXPR_ARG (exp, 2);
14492 rtx op0 = expand_normal (arg0);
14493 rtx op1 = expand_normal (arg1);
14494 rtx op2 = expand_normal (arg2);
14495 machine_mode mode0 = insn_data[icode].operand[0].mode;
14496 machine_mode mode1 = insn_data[icode].operand[1].mode;
14497 machine_mode mode2 = insn_data[icode].operand[2].mode;
14499 if (icode == CODE_FOR_nothing)
14500 /* Builtin not supported on this processor. */
14501 return NULL_RTX;
14503 /* If we got invalid arguments bail out before generating bad rtl. */
14504 if (arg0 == error_mark_node
14505 || arg1 == error_mark_node
14506 || arg2 == error_mark_node)
14507 return NULL_RTX;
14509 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14510 op0 = copy_to_mode_reg (mode0, op0);
14511 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14512 op1 = copy_to_mode_reg (mode1, op1);
14513 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14514 op2 = copy_to_mode_reg (mode2, op2);
14516 pat = GEN_FCN (icode) (op0, op1, op2);
14517 if (pat)
14518 emit_insn (pat);
14520 return NULL_RTX;
14523 static rtx
14524 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14526 tree arg0 = CALL_EXPR_ARG (exp, 0);
14527 tree arg1 = CALL_EXPR_ARG (exp, 1);
14528 tree arg2 = CALL_EXPR_ARG (exp, 2);
14529 rtx op0 = expand_normal (arg0);
14530 rtx op1 = expand_normal (arg1);
14531 rtx op2 = expand_normal (arg2);
14532 rtx pat, addr, rawaddr;
14533 machine_mode tmode = insn_data[icode].operand[0].mode;
14534 machine_mode smode = insn_data[icode].operand[1].mode;
14535 machine_mode mode1 = Pmode;
14536 machine_mode mode2 = Pmode;
14538 /* Invalid arguments. Bail before doing anything stoopid! */
14539 if (arg0 == error_mark_node
14540 || arg1 == error_mark_node
14541 || arg2 == error_mark_node)
14542 return const0_rtx;
14544 op2 = copy_to_mode_reg (mode2, op2);
14546 /* For STVX, express the RTL accurately by ANDing the address with -16.
14547 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14548 so the raw address is fine. */
14549 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14550 || icode == CODE_FOR_altivec_stvx_v2di_2op
14551 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14552 || icode == CODE_FOR_altivec_stvx_v4si_2op
14553 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14554 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14556 if (op1 == const0_rtx)
14557 rawaddr = op2;
14558 else
14560 op1 = copy_to_mode_reg (mode1, op1);
14561 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14564 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14565 addr = gen_rtx_MEM (tmode, addr);
14567 op0 = copy_to_mode_reg (tmode, op0);
14569 /* For -maltivec=be, emit a permute to swap the elements, followed
14570 by the store. */
14571 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14573 rtx temp = gen_reg_rtx (tmode);
14574 rtx sel = swap_selector_for_mode (tmode);
14575 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14576 UNSPEC_VPERM);
14577 emit_insn (gen_rtx_SET (temp, vperm));
14578 emit_insn (gen_rtx_SET (addr, temp));
14580 else
14581 emit_insn (gen_rtx_SET (addr, op0));
14583 else
14585 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14586 op0 = copy_to_mode_reg (smode, op0);
14588 if (op1 == const0_rtx)
14589 addr = gen_rtx_MEM (tmode, op2);
14590 else
14592 op1 = copy_to_mode_reg (mode1, op1);
14593 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14596 pat = GEN_FCN (icode) (addr, op0);
14597 if (pat)
14598 emit_insn (pat);
14601 return NULL_RTX;
14604 /* Return the appropriate SPR number associated with the given builtin. */
14605 static inline HOST_WIDE_INT
14606 htm_spr_num (enum rs6000_builtins code)
14608 if (code == HTM_BUILTIN_GET_TFHAR
14609 || code == HTM_BUILTIN_SET_TFHAR)
14610 return TFHAR_SPR;
14611 else if (code == HTM_BUILTIN_GET_TFIAR
14612 || code == HTM_BUILTIN_SET_TFIAR)
14613 return TFIAR_SPR;
14614 else if (code == HTM_BUILTIN_GET_TEXASR
14615 || code == HTM_BUILTIN_SET_TEXASR)
14616 return TEXASR_SPR;
14617 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14618 || code == HTM_BUILTIN_SET_TEXASRU);
14619 return TEXASRU_SPR;
14622 /* Return the appropriate SPR regno associated with the given builtin. */
14623 static inline HOST_WIDE_INT
14624 htm_spr_regno (enum rs6000_builtins code)
14626 if (code == HTM_BUILTIN_GET_TFHAR
14627 || code == HTM_BUILTIN_SET_TFHAR)
14628 return TFHAR_REGNO;
14629 else if (code == HTM_BUILTIN_GET_TFIAR
14630 || code == HTM_BUILTIN_SET_TFIAR)
14631 return TFIAR_REGNO;
14632 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14633 || code == HTM_BUILTIN_SET_TEXASR
14634 || code == HTM_BUILTIN_GET_TEXASRU
14635 || code == HTM_BUILTIN_SET_TEXASRU);
14636 return TEXASR_REGNO;
14639 /* Return the correct ICODE value depending on whether we are
14640 setting or reading the HTM SPRs. */
14641 static inline enum insn_code
14642 rs6000_htm_spr_icode (bool nonvoid)
14644 if (nonvoid)
14645 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14646 else
14647 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14650 /* Expand the HTM builtin in EXP and store the result in TARGET.
14651 Store true in *EXPANDEDP if we found a builtin to expand. */
14652 static rtx
14653 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14655 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14656 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14657 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14658 const struct builtin_description *d;
14659 size_t i;
14661 *expandedp = true;
14663 if (!TARGET_POWERPC64
14664 && (fcode == HTM_BUILTIN_TABORTDC
14665 || fcode == HTM_BUILTIN_TABORTDCI))
14667 size_t uns_fcode = (size_t)fcode;
14668 const char *name = rs6000_builtin_info[uns_fcode].name;
14669 error ("builtin %qs is only valid in 64-bit mode", name);
14670 return const0_rtx;
14673 /* Expand the HTM builtins. */
14674 d = bdesc_htm;
14675 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14676 if (d->code == fcode)
14678 rtx op[MAX_HTM_OPERANDS], pat;
14679 int nopnds = 0;
14680 tree arg;
14681 call_expr_arg_iterator iter;
14682 unsigned attr = rs6000_builtin_info[fcode].attr;
14683 enum insn_code icode = d->icode;
14684 const struct insn_operand_data *insn_op;
14685 bool uses_spr = (attr & RS6000_BTC_SPR);
14686 rtx cr = NULL_RTX;
14688 if (uses_spr)
14689 icode = rs6000_htm_spr_icode (nonvoid);
14690 insn_op = &insn_data[icode].operand[0];
14692 if (nonvoid)
14694 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14695 if (!target
14696 || GET_MODE (target) != tmode
14697 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14698 target = gen_reg_rtx (tmode);
14699 if (uses_spr)
14700 op[nopnds++] = target;
14703 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14705 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14706 return const0_rtx;
14708 insn_op = &insn_data[icode].operand[nopnds];
14710 op[nopnds] = expand_normal (arg);
14712 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14714 if (!strcmp (insn_op->constraint, "n"))
14716 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14717 if (!CONST_INT_P (op[nopnds]))
14718 error ("argument %d must be an unsigned literal", arg_num);
14719 else
14720 error ("argument %d is an unsigned literal that is "
14721 "out of range", arg_num);
14722 return const0_rtx;
14724 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14727 nopnds++;
14730 /* Handle the builtins for extended mnemonics. These accept
14731 no arguments, but map to builtins that take arguments. */
14732 switch (fcode)
14734 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14735 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14736 op[nopnds++] = GEN_INT (1);
14737 if (flag_checking)
14738 attr |= RS6000_BTC_UNARY;
14739 break;
14740 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14741 op[nopnds++] = GEN_INT (0);
14742 if (flag_checking)
14743 attr |= RS6000_BTC_UNARY;
14744 break;
14745 default:
14746 break;
14749 /* If this builtin accesses SPRs, then pass in the appropriate
14750 SPR number and SPR regno as the last two operands. */
14751 if (uses_spr)
14753 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14754 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14755 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14757 /* If this builtin accesses a CR, then pass in a scratch
14758 CR as the last operand. */
14759 else if (attr & RS6000_BTC_CR)
14760 { cr = gen_reg_rtx (CCmode);
14761 op[nopnds++] = cr;
14764 if (flag_checking)
14766 int expected_nopnds = 0;
14767 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14768 expected_nopnds = 1;
14769 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14770 expected_nopnds = 2;
14771 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14772 expected_nopnds = 3;
14773 if (!(attr & RS6000_BTC_VOID))
14774 expected_nopnds += 1;
14775 if (uses_spr)
14776 expected_nopnds += 2;
14778 gcc_assert (nopnds == expected_nopnds
14779 && nopnds <= MAX_HTM_OPERANDS);
14782 switch (nopnds)
14784 case 1:
14785 pat = GEN_FCN (icode) (op[0]);
14786 break;
14787 case 2:
14788 pat = GEN_FCN (icode) (op[0], op[1]);
14789 break;
14790 case 3:
14791 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14792 break;
14793 case 4:
14794 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14795 break;
14796 default:
14797 gcc_unreachable ();
14799 if (!pat)
14800 return NULL_RTX;
14801 emit_insn (pat);
14803 if (attr & RS6000_BTC_CR)
14805 if (fcode == HTM_BUILTIN_TBEGIN)
14807 /* Emit code to set TARGET to true or false depending on
14808 whether the tbegin. instruction successfully or failed
14809 to start a transaction. We do this by placing the 1's
14810 complement of CR's EQ bit into TARGET. */
14811 rtx scratch = gen_reg_rtx (SImode);
14812 emit_insn (gen_rtx_SET (scratch,
14813 gen_rtx_EQ (SImode, cr,
14814 const0_rtx)));
14815 emit_insn (gen_rtx_SET (target,
14816 gen_rtx_XOR (SImode, scratch,
14817 GEN_INT (1))));
14819 else
14821 /* Emit code to copy the 4-bit condition register field
14822 CR into the least significant end of register TARGET. */
14823 rtx scratch1 = gen_reg_rtx (SImode);
14824 rtx scratch2 = gen_reg_rtx (SImode);
14825 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14826 emit_insn (gen_movcc (subreg, cr));
14827 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14828 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14832 if (nonvoid)
14833 return target;
14834 return const0_rtx;
14837 *expandedp = false;
14838 return NULL_RTX;
14841 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14843 static rtx
14844 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14845 rtx target)
14847 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14848 if (fcode == RS6000_BUILTIN_CPU_INIT)
14849 return const0_rtx;
14851 if (target == 0 || GET_MODE (target) != SImode)
14852 target = gen_reg_rtx (SImode);
14854 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14855 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14856 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14857 to a STRING_CST. */
14858 if (TREE_CODE (arg) == ARRAY_REF
14859 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14860 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14861 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14862 arg = TREE_OPERAND (arg, 0);
14864 if (TREE_CODE (arg) != STRING_CST)
14866 error ("builtin %qs only accepts a string argument",
14867 rs6000_builtin_info[(size_t) fcode].name);
14868 return const0_rtx;
14871 if (fcode == RS6000_BUILTIN_CPU_IS)
14873 const char *cpu = TREE_STRING_POINTER (arg);
14874 rtx cpuid = NULL_RTX;
14875 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14876 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14878 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14879 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14880 break;
14882 if (cpuid == NULL_RTX)
14884 /* Invalid CPU argument. */
14885 error ("cpu %qs is an invalid argument to builtin %qs",
14886 cpu, rs6000_builtin_info[(size_t) fcode].name);
14887 return const0_rtx;
14890 rtx platform = gen_reg_rtx (SImode);
14891 rtx tcbmem = gen_const_mem (SImode,
14892 gen_rtx_PLUS (Pmode,
14893 gen_rtx_REG (Pmode, TLS_REGNUM),
14894 GEN_INT (TCB_PLATFORM_OFFSET)));
14895 emit_move_insn (platform, tcbmem);
14896 emit_insn (gen_eqsi3 (target, platform, cpuid));
14898 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14900 const char *hwcap = TREE_STRING_POINTER (arg);
14901 rtx mask = NULL_RTX;
14902 int hwcap_offset;
14903 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14904 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14906 mask = GEN_INT (cpu_supports_info[i].mask);
14907 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14908 break;
14910 if (mask == NULL_RTX)
14912 /* Invalid HWCAP argument. */
14913 error ("%s %qs is an invalid argument to builtin %qs",
14914 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14915 return const0_rtx;
14918 rtx tcb_hwcap = gen_reg_rtx (SImode);
14919 rtx tcbmem = gen_const_mem (SImode,
14920 gen_rtx_PLUS (Pmode,
14921 gen_rtx_REG (Pmode, TLS_REGNUM),
14922 GEN_INT (hwcap_offset)));
14923 emit_move_insn (tcb_hwcap, tcbmem);
14924 rtx scratch1 = gen_reg_rtx (SImode);
14925 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14926 rtx scratch2 = gen_reg_rtx (SImode);
14927 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14928 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14930 else
14931 gcc_unreachable ();
14933 /* Record that we have expanded a CPU builtin, so that we can later
14934 emit a reference to the special symbol exported by LIBC to ensure we
14935 do not link against an old LIBC that doesn't support this feature. */
14936 cpu_builtin_p = true;
14938 #else
14939 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14940 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14942 /* For old LIBCs, always return FALSE. */
14943 emit_move_insn (target, GEN_INT (0));
14944 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14946 return target;
14949 static rtx
14950 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14952 rtx pat;
14953 tree arg0 = CALL_EXPR_ARG (exp, 0);
14954 tree arg1 = CALL_EXPR_ARG (exp, 1);
14955 tree arg2 = CALL_EXPR_ARG (exp, 2);
14956 rtx op0 = expand_normal (arg0);
14957 rtx op1 = expand_normal (arg1);
14958 rtx op2 = expand_normal (arg2);
14959 machine_mode tmode = insn_data[icode].operand[0].mode;
14960 machine_mode mode0 = insn_data[icode].operand[1].mode;
14961 machine_mode mode1 = insn_data[icode].operand[2].mode;
14962 machine_mode mode2 = insn_data[icode].operand[3].mode;
14964 if (icode == CODE_FOR_nothing)
14965 /* Builtin not supported on this processor. */
14966 return 0;
14968 /* If we got invalid arguments bail out before generating bad rtl. */
14969 if (arg0 == error_mark_node
14970 || arg1 == error_mark_node
14971 || arg2 == error_mark_node)
14972 return const0_rtx;
14974 /* Check and prepare argument depending on the instruction code.
14976 Note that a switch statement instead of the sequence of tests
14977 would be incorrect as many of the CODE_FOR values could be
14978 CODE_FOR_nothing and that would yield multiple alternatives
14979 with identical values. We'd never reach here at runtime in
14980 this case. */
14981 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14982 || icode == CODE_FOR_altivec_vsldoi_v2df
14983 || icode == CODE_FOR_altivec_vsldoi_v4si
14984 || icode == CODE_FOR_altivec_vsldoi_v8hi
14985 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14987 /* Only allow 4-bit unsigned literals. */
14988 STRIP_NOPS (arg2);
14989 if (TREE_CODE (arg2) != INTEGER_CST
14990 || TREE_INT_CST_LOW (arg2) & ~0xf)
14992 error ("argument 3 must be a 4-bit unsigned literal");
14993 return CONST0_RTX (tmode);
14996 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14997 || icode == CODE_FOR_vsx_xxpermdi_v2di
14998 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14999 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15000 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15001 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15002 || icode == CODE_FOR_vsx_xxpermdi_v4si
15003 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15004 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15005 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15006 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15007 || icode == CODE_FOR_vsx_xxsldwi_v4si
15008 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15009 || icode == CODE_FOR_vsx_xxsldwi_v2di
15010 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15012 /* Only allow 2-bit unsigned literals. */
15013 STRIP_NOPS (arg2);
15014 if (TREE_CODE (arg2) != INTEGER_CST
15015 || TREE_INT_CST_LOW (arg2) & ~0x3)
15017 error ("argument 3 must be a 2-bit unsigned literal");
15018 return CONST0_RTX (tmode);
15021 else if (icode == CODE_FOR_vsx_set_v2df
15022 || icode == CODE_FOR_vsx_set_v2di
15023 || icode == CODE_FOR_bcdadd
15024 || icode == CODE_FOR_bcdadd_lt
15025 || icode == CODE_FOR_bcdadd_eq
15026 || icode == CODE_FOR_bcdadd_gt
15027 || icode == CODE_FOR_bcdsub
15028 || icode == CODE_FOR_bcdsub_lt
15029 || icode == CODE_FOR_bcdsub_eq
15030 || icode == CODE_FOR_bcdsub_gt)
15032 /* Only allow 1-bit unsigned literals. */
15033 STRIP_NOPS (arg2);
15034 if (TREE_CODE (arg2) != INTEGER_CST
15035 || TREE_INT_CST_LOW (arg2) & ~0x1)
15037 error ("argument 3 must be a 1-bit unsigned literal");
15038 return CONST0_RTX (tmode);
15041 else if (icode == CODE_FOR_dfp_ddedpd_dd
15042 || icode == CODE_FOR_dfp_ddedpd_td)
15044 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15045 STRIP_NOPS (arg0);
15046 if (TREE_CODE (arg0) != INTEGER_CST
15047 || TREE_INT_CST_LOW (arg2) & ~0x3)
15049 error ("argument 1 must be 0 or 2");
15050 return CONST0_RTX (tmode);
15053 else if (icode == CODE_FOR_dfp_denbcd_dd
15054 || icode == CODE_FOR_dfp_denbcd_td)
15056 /* Only allow 1-bit unsigned literals. */
15057 STRIP_NOPS (arg0);
15058 if (TREE_CODE (arg0) != INTEGER_CST
15059 || TREE_INT_CST_LOW (arg0) & ~0x1)
15061 error ("argument 1 must be a 1-bit unsigned literal");
15062 return CONST0_RTX (tmode);
15065 else if (icode == CODE_FOR_dfp_dscli_dd
15066 || icode == CODE_FOR_dfp_dscli_td
15067 || icode == CODE_FOR_dfp_dscri_dd
15068 || icode == CODE_FOR_dfp_dscri_td)
15070 /* Only allow 6-bit unsigned literals. */
15071 STRIP_NOPS (arg1);
15072 if (TREE_CODE (arg1) != INTEGER_CST
15073 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15075 error ("argument 2 must be a 6-bit unsigned literal");
15076 return CONST0_RTX (tmode);
15079 else if (icode == CODE_FOR_crypto_vshasigmaw
15080 || icode == CODE_FOR_crypto_vshasigmad)
15082 /* Check whether the 2nd and 3rd arguments are integer constants and in
15083 range and prepare arguments. */
15084 STRIP_NOPS (arg1);
15085 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15087 error ("argument 2 must be 0 or 1");
15088 return CONST0_RTX (tmode);
15091 STRIP_NOPS (arg2);
15092 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15094 error ("argument 3 must be in the range 0..15");
15095 return CONST0_RTX (tmode);
15099 if (target == 0
15100 || GET_MODE (target) != tmode
15101 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15102 target = gen_reg_rtx (tmode);
15104 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15105 op0 = copy_to_mode_reg (mode0, op0);
15106 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15107 op1 = copy_to_mode_reg (mode1, op1);
15108 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15109 op2 = copy_to_mode_reg (mode2, op2);
15111 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15112 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15113 else
15114 pat = GEN_FCN (icode) (target, op0, op1, op2);
15115 if (! pat)
15116 return 0;
15117 emit_insn (pat);
15119 return target;
15122 /* Expand the lvx builtins. */
15123 static rtx
15124 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15126 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15127 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15128 tree arg0;
15129 machine_mode tmode, mode0;
15130 rtx pat, op0;
15131 enum insn_code icode;
15133 switch (fcode)
15135 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15136 icode = CODE_FOR_vector_altivec_load_v16qi;
15137 break;
15138 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15139 icode = CODE_FOR_vector_altivec_load_v8hi;
15140 break;
15141 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15142 icode = CODE_FOR_vector_altivec_load_v4si;
15143 break;
15144 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15145 icode = CODE_FOR_vector_altivec_load_v4sf;
15146 break;
15147 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15148 icode = CODE_FOR_vector_altivec_load_v2df;
15149 break;
15150 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15151 icode = CODE_FOR_vector_altivec_load_v2di;
15152 break;
15153 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15154 icode = CODE_FOR_vector_altivec_load_v1ti;
15155 break;
15156 default:
15157 *expandedp = false;
15158 return NULL_RTX;
15161 *expandedp = true;
15163 arg0 = CALL_EXPR_ARG (exp, 0);
15164 op0 = expand_normal (arg0);
15165 tmode = insn_data[icode].operand[0].mode;
15166 mode0 = insn_data[icode].operand[1].mode;
15168 if (target == 0
15169 || GET_MODE (target) != tmode
15170 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15171 target = gen_reg_rtx (tmode);
15173 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15174 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15176 pat = GEN_FCN (icode) (target, op0);
15177 if (! pat)
15178 return 0;
15179 emit_insn (pat);
15180 return target;
15183 /* Expand the stvx builtins. */
15184 static rtx
15185 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15186 bool *expandedp)
15188 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15189 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15190 tree arg0, arg1;
15191 machine_mode mode0, mode1;
15192 rtx pat, op0, op1;
15193 enum insn_code icode;
15195 switch (fcode)
15197 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15198 icode = CODE_FOR_vector_altivec_store_v16qi;
15199 break;
15200 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15201 icode = CODE_FOR_vector_altivec_store_v8hi;
15202 break;
15203 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15204 icode = CODE_FOR_vector_altivec_store_v4si;
15205 break;
15206 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15207 icode = CODE_FOR_vector_altivec_store_v4sf;
15208 break;
15209 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15210 icode = CODE_FOR_vector_altivec_store_v2df;
15211 break;
15212 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15213 icode = CODE_FOR_vector_altivec_store_v2di;
15214 break;
15215 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15216 icode = CODE_FOR_vector_altivec_store_v1ti;
15217 break;
15218 default:
15219 *expandedp = false;
15220 return NULL_RTX;
15223 arg0 = CALL_EXPR_ARG (exp, 0);
15224 arg1 = CALL_EXPR_ARG (exp, 1);
15225 op0 = expand_normal (arg0);
15226 op1 = expand_normal (arg1);
15227 mode0 = insn_data[icode].operand[0].mode;
15228 mode1 = insn_data[icode].operand[1].mode;
15230 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15231 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15232 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15233 op1 = copy_to_mode_reg (mode1, op1);
15235 pat = GEN_FCN (icode) (op0, op1);
15236 if (pat)
15237 emit_insn (pat);
15239 *expandedp = true;
15240 return NULL_RTX;
15243 /* Expand the dst builtins. */
15244 static rtx
15245 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15246 bool *expandedp)
15248 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15249 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15250 tree arg0, arg1, arg2;
15251 machine_mode mode0, mode1;
15252 rtx pat, op0, op1, op2;
15253 const struct builtin_description *d;
15254 size_t i;
15256 *expandedp = false;
15258 /* Handle DST variants. */
15259 d = bdesc_dst;
15260 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15261 if (d->code == fcode)
15263 arg0 = CALL_EXPR_ARG (exp, 0);
15264 arg1 = CALL_EXPR_ARG (exp, 1);
15265 arg2 = CALL_EXPR_ARG (exp, 2);
15266 op0 = expand_normal (arg0);
15267 op1 = expand_normal (arg1);
15268 op2 = expand_normal (arg2);
15269 mode0 = insn_data[d->icode].operand[0].mode;
15270 mode1 = insn_data[d->icode].operand[1].mode;
15272 /* Invalid arguments, bail out before generating bad rtl. */
15273 if (arg0 == error_mark_node
15274 || arg1 == error_mark_node
15275 || arg2 == error_mark_node)
15276 return const0_rtx;
15278 *expandedp = true;
15279 STRIP_NOPS (arg2);
15280 if (TREE_CODE (arg2) != INTEGER_CST
15281 || TREE_INT_CST_LOW (arg2) & ~0x3)
15283 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15284 return const0_rtx;
15287 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15288 op0 = copy_to_mode_reg (Pmode, op0);
15289 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15290 op1 = copy_to_mode_reg (mode1, op1);
15292 pat = GEN_FCN (d->icode) (op0, op1, op2);
15293 if (pat != 0)
15294 emit_insn (pat);
15296 return NULL_RTX;
15299 return NULL_RTX;
15302 /* Expand vec_init builtin. */
15303 static rtx
15304 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15306 machine_mode tmode = TYPE_MODE (type);
15307 machine_mode inner_mode = GET_MODE_INNER (tmode);
15308 int i, n_elt = GET_MODE_NUNITS (tmode);
15310 gcc_assert (VECTOR_MODE_P (tmode));
15311 gcc_assert (n_elt == call_expr_nargs (exp));
15313 if (!target || !register_operand (target, tmode))
15314 target = gen_reg_rtx (tmode);
15316 /* If we have a vector compromised of a single element, such as V1TImode, do
15317 the initialization directly. */
15318 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15320 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15321 emit_move_insn (target, gen_lowpart (tmode, x));
15323 else
15325 rtvec v = rtvec_alloc (n_elt);
15327 for (i = 0; i < n_elt; ++i)
15329 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15330 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15333 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15336 return target;
15339 /* Return the integer constant in ARG. Constrain it to be in the range
15340 of the subparts of VEC_TYPE; issue an error if not. */
15342 static int
15343 get_element_number (tree vec_type, tree arg)
15345 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15347 if (!tree_fits_uhwi_p (arg)
15348 || (elt = tree_to_uhwi (arg), elt > max))
15350 error ("selector must be an integer constant in the range 0..%wi", max);
15351 return 0;
15354 return elt;
15357 /* Expand vec_set builtin. */
15358 static rtx
15359 altivec_expand_vec_set_builtin (tree exp)
15361 machine_mode tmode, mode1;
15362 tree arg0, arg1, arg2;
15363 int elt;
15364 rtx op0, op1;
15366 arg0 = CALL_EXPR_ARG (exp, 0);
15367 arg1 = CALL_EXPR_ARG (exp, 1);
15368 arg2 = CALL_EXPR_ARG (exp, 2);
15370 tmode = TYPE_MODE (TREE_TYPE (arg0));
15371 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15372 gcc_assert (VECTOR_MODE_P (tmode));
15374 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15375 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15376 elt = get_element_number (TREE_TYPE (arg0), arg2);
15378 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15379 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15381 op0 = force_reg (tmode, op0);
15382 op1 = force_reg (mode1, op1);
15384 rs6000_expand_vector_set (op0, op1, elt);
15386 return op0;
15389 /* Expand vec_ext builtin. */
15390 static rtx
15391 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15393 machine_mode tmode, mode0;
15394 tree arg0, arg1;
15395 rtx op0;
15396 rtx op1;
15398 arg0 = CALL_EXPR_ARG (exp, 0);
15399 arg1 = CALL_EXPR_ARG (exp, 1);
15401 op0 = expand_normal (arg0);
15402 op1 = expand_normal (arg1);
15404 /* Call get_element_number to validate arg1 if it is a constant. */
15405 if (TREE_CODE (arg1) == INTEGER_CST)
15406 (void) get_element_number (TREE_TYPE (arg0), arg1);
15408 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15409 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15410 gcc_assert (VECTOR_MODE_P (mode0));
15412 op0 = force_reg (mode0, op0);
15414 if (optimize || !target || !register_operand (target, tmode))
15415 target = gen_reg_rtx (tmode);
15417 rs6000_expand_vector_extract (target, op0, op1);
15419 return target;
15422 /* Expand the builtin in EXP and store the result in TARGET. Store
15423 true in *EXPANDEDP if we found a builtin to expand. */
15424 static rtx
15425 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15427 const struct builtin_description *d;
15428 size_t i;
15429 enum insn_code icode;
15430 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15431 tree arg0, arg1, arg2;
15432 rtx op0, pat;
15433 machine_mode tmode, mode0;
15434 enum rs6000_builtins fcode
15435 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15437 if (rs6000_overloaded_builtin_p (fcode))
15439 *expandedp = true;
15440 error ("unresolved overload for Altivec builtin %qF", fndecl);
15442 /* Given it is invalid, just generate a normal call. */
15443 return expand_call (exp, target, false);
15446 target = altivec_expand_ld_builtin (exp, target, expandedp);
15447 if (*expandedp)
15448 return target;
15450 target = altivec_expand_st_builtin (exp, target, expandedp);
15451 if (*expandedp)
15452 return target;
15454 target = altivec_expand_dst_builtin (exp, target, expandedp);
15455 if (*expandedp)
15456 return target;
15458 *expandedp = true;
15460 switch (fcode)
15462 case ALTIVEC_BUILTIN_STVX_V2DF:
15463 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15464 case ALTIVEC_BUILTIN_STVX_V2DI:
15465 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15466 case ALTIVEC_BUILTIN_STVX_V4SF:
15467 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15468 case ALTIVEC_BUILTIN_STVX:
15469 case ALTIVEC_BUILTIN_STVX_V4SI:
15470 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15471 case ALTIVEC_BUILTIN_STVX_V8HI:
15472 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15473 case ALTIVEC_BUILTIN_STVX_V16QI:
15474 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15475 case ALTIVEC_BUILTIN_STVEBX:
15476 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15477 case ALTIVEC_BUILTIN_STVEHX:
15478 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15479 case ALTIVEC_BUILTIN_STVEWX:
15480 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15481 case ALTIVEC_BUILTIN_STVXL_V2DF:
15482 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15483 case ALTIVEC_BUILTIN_STVXL_V2DI:
15484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15485 case ALTIVEC_BUILTIN_STVXL_V4SF:
15486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15487 case ALTIVEC_BUILTIN_STVXL:
15488 case ALTIVEC_BUILTIN_STVXL_V4SI:
15489 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15490 case ALTIVEC_BUILTIN_STVXL_V8HI:
15491 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15492 case ALTIVEC_BUILTIN_STVXL_V16QI:
15493 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15495 case ALTIVEC_BUILTIN_STVLX:
15496 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15497 case ALTIVEC_BUILTIN_STVLXL:
15498 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15499 case ALTIVEC_BUILTIN_STVRX:
15500 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15501 case ALTIVEC_BUILTIN_STVRXL:
15502 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15504 case P9V_BUILTIN_STXVL:
15505 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15507 case VSX_BUILTIN_STXVD2X_V1TI:
15508 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15509 case VSX_BUILTIN_STXVD2X_V2DF:
15510 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15511 case VSX_BUILTIN_STXVD2X_V2DI:
15512 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15513 case VSX_BUILTIN_STXVW4X_V4SF:
15514 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15515 case VSX_BUILTIN_STXVW4X_V4SI:
15516 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15517 case VSX_BUILTIN_STXVW4X_V8HI:
15518 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15519 case VSX_BUILTIN_STXVW4X_V16QI:
15520 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15522 /* For the following on big endian, it's ok to use any appropriate
15523 unaligned-supporting store, so use a generic expander. For
15524 little-endian, the exact element-reversing instruction must
15525 be used. */
15526 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15528 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15529 : CODE_FOR_vsx_st_elemrev_v2df);
15530 return altivec_expand_stv_builtin (code, exp);
15532 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15534 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15535 : CODE_FOR_vsx_st_elemrev_v2di);
15536 return altivec_expand_stv_builtin (code, exp);
15538 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15540 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15541 : CODE_FOR_vsx_st_elemrev_v4sf);
15542 return altivec_expand_stv_builtin (code, exp);
15544 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15546 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15547 : CODE_FOR_vsx_st_elemrev_v4si);
15548 return altivec_expand_stv_builtin (code, exp);
15550 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15552 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15553 : CODE_FOR_vsx_st_elemrev_v8hi);
15554 return altivec_expand_stv_builtin (code, exp);
15556 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15558 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15559 : CODE_FOR_vsx_st_elemrev_v16qi);
15560 return altivec_expand_stv_builtin (code, exp);
15563 case ALTIVEC_BUILTIN_MFVSCR:
15564 icode = CODE_FOR_altivec_mfvscr;
15565 tmode = insn_data[icode].operand[0].mode;
15567 if (target == 0
15568 || GET_MODE (target) != tmode
15569 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15570 target = gen_reg_rtx (tmode);
15572 pat = GEN_FCN (icode) (target);
15573 if (! pat)
15574 return 0;
15575 emit_insn (pat);
15576 return target;
15578 case ALTIVEC_BUILTIN_MTVSCR:
15579 icode = CODE_FOR_altivec_mtvscr;
15580 arg0 = CALL_EXPR_ARG (exp, 0);
15581 op0 = expand_normal (arg0);
15582 mode0 = insn_data[icode].operand[0].mode;
15584 /* If we got invalid arguments bail out before generating bad rtl. */
15585 if (arg0 == error_mark_node)
15586 return const0_rtx;
15588 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15589 op0 = copy_to_mode_reg (mode0, op0);
15591 pat = GEN_FCN (icode) (op0);
15592 if (pat)
15593 emit_insn (pat);
15594 return NULL_RTX;
15596 case ALTIVEC_BUILTIN_DSSALL:
15597 emit_insn (gen_altivec_dssall ());
15598 return NULL_RTX;
15600 case ALTIVEC_BUILTIN_DSS:
15601 icode = CODE_FOR_altivec_dss;
15602 arg0 = CALL_EXPR_ARG (exp, 0);
15603 STRIP_NOPS (arg0);
15604 op0 = expand_normal (arg0);
15605 mode0 = insn_data[icode].operand[0].mode;
15607 /* If we got invalid arguments bail out before generating bad rtl. */
15608 if (arg0 == error_mark_node)
15609 return const0_rtx;
15611 if (TREE_CODE (arg0) != INTEGER_CST
15612 || TREE_INT_CST_LOW (arg0) & ~0x3)
15614 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15615 return const0_rtx;
15618 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15619 op0 = copy_to_mode_reg (mode0, op0);
15621 emit_insn (gen_altivec_dss (op0));
15622 return NULL_RTX;
15624 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15625 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15626 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15627 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15628 case VSX_BUILTIN_VEC_INIT_V2DF:
15629 case VSX_BUILTIN_VEC_INIT_V2DI:
15630 case VSX_BUILTIN_VEC_INIT_V1TI:
15631 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15633 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15634 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15635 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15636 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15637 case VSX_BUILTIN_VEC_SET_V2DF:
15638 case VSX_BUILTIN_VEC_SET_V2DI:
15639 case VSX_BUILTIN_VEC_SET_V1TI:
15640 return altivec_expand_vec_set_builtin (exp);
15642 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15643 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15644 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15645 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15646 case VSX_BUILTIN_VEC_EXT_V2DF:
15647 case VSX_BUILTIN_VEC_EXT_V2DI:
15648 case VSX_BUILTIN_VEC_EXT_V1TI:
15649 return altivec_expand_vec_ext_builtin (exp, target);
15651 case P9V_BUILTIN_VEXTRACT4B:
15652 case P9V_BUILTIN_VEC_VEXTRACT4B:
15653 arg1 = CALL_EXPR_ARG (exp, 1);
15654 STRIP_NOPS (arg1);
15656 /* Generate a normal call if it is invalid. */
15657 if (arg1 == error_mark_node)
15658 return expand_call (exp, target, false);
15660 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15662 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15663 return expand_call (exp, target, false);
15665 break;
15667 case P9V_BUILTIN_VINSERT4B:
15668 case P9V_BUILTIN_VINSERT4B_DI:
15669 case P9V_BUILTIN_VEC_VINSERT4B:
15670 arg2 = CALL_EXPR_ARG (exp, 2);
15671 STRIP_NOPS (arg2);
15673 /* Generate a normal call if it is invalid. */
15674 if (arg2 == error_mark_node)
15675 return expand_call (exp, target, false);
15677 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15679 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15680 return expand_call (exp, target, false);
15682 break;
15684 default:
15685 break;
15686 /* Fall through. */
15689 /* Expand abs* operations. */
15690 d = bdesc_abs;
15691 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15692 if (d->code == fcode)
15693 return altivec_expand_abs_builtin (d->icode, exp, target);
15695 /* Expand the AltiVec predicates. */
15696 d = bdesc_altivec_preds;
15697 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15698 if (d->code == fcode)
15699 return altivec_expand_predicate_builtin (d->icode, exp, target);
15701 /* LV* are funky. We initialized them differently. */
15702 switch (fcode)
15704 case ALTIVEC_BUILTIN_LVSL:
15705 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15706 exp, target, false);
15707 case ALTIVEC_BUILTIN_LVSR:
15708 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15709 exp, target, false);
15710 case ALTIVEC_BUILTIN_LVEBX:
15711 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15712 exp, target, false);
15713 case ALTIVEC_BUILTIN_LVEHX:
15714 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15715 exp, target, false);
15716 case ALTIVEC_BUILTIN_LVEWX:
15717 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15718 exp, target, false);
15719 case ALTIVEC_BUILTIN_LVXL_V2DF:
15720 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15721 exp, target, false);
15722 case ALTIVEC_BUILTIN_LVXL_V2DI:
15723 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15724 exp, target, false);
15725 case ALTIVEC_BUILTIN_LVXL_V4SF:
15726 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15727 exp, target, false);
15728 case ALTIVEC_BUILTIN_LVXL:
15729 case ALTIVEC_BUILTIN_LVXL_V4SI:
15730 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15731 exp, target, false);
15732 case ALTIVEC_BUILTIN_LVXL_V8HI:
15733 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15734 exp, target, false);
15735 case ALTIVEC_BUILTIN_LVXL_V16QI:
15736 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15737 exp, target, false);
15738 case ALTIVEC_BUILTIN_LVX_V2DF:
15739 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15740 exp, target, false);
15741 case ALTIVEC_BUILTIN_LVX_V2DI:
15742 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15743 exp, target, false);
15744 case ALTIVEC_BUILTIN_LVX_V4SF:
15745 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15746 exp, target, false);
15747 case ALTIVEC_BUILTIN_LVX:
15748 case ALTIVEC_BUILTIN_LVX_V4SI:
15749 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15750 exp, target, false);
15751 case ALTIVEC_BUILTIN_LVX_V8HI:
15752 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15753 exp, target, false);
15754 case ALTIVEC_BUILTIN_LVX_V16QI:
15755 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15756 exp, target, false);
15757 case ALTIVEC_BUILTIN_LVLX:
15758 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15759 exp, target, true);
15760 case ALTIVEC_BUILTIN_LVLXL:
15761 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15762 exp, target, true);
15763 case ALTIVEC_BUILTIN_LVRX:
15764 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15765 exp, target, true);
15766 case ALTIVEC_BUILTIN_LVRXL:
15767 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15768 exp, target, true);
15769 case VSX_BUILTIN_LXVD2X_V1TI:
15770 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15771 exp, target, false);
15772 case VSX_BUILTIN_LXVD2X_V2DF:
15773 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15774 exp, target, false);
15775 case VSX_BUILTIN_LXVD2X_V2DI:
15776 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15777 exp, target, false);
15778 case VSX_BUILTIN_LXVW4X_V4SF:
15779 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15780 exp, target, false);
15781 case VSX_BUILTIN_LXVW4X_V4SI:
15782 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15783 exp, target, false);
15784 case VSX_BUILTIN_LXVW4X_V8HI:
15785 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15786 exp, target, false);
15787 case VSX_BUILTIN_LXVW4X_V16QI:
15788 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15789 exp, target, false);
15790 /* For the following on big endian, it's ok to use any appropriate
15791 unaligned-supporting load, so use a generic expander. For
15792 little-endian, the exact element-reversing instruction must
15793 be used. */
15794 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15796 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15797 : CODE_FOR_vsx_ld_elemrev_v2df);
15798 return altivec_expand_lv_builtin (code, exp, target, false);
15800 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15802 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15803 : CODE_FOR_vsx_ld_elemrev_v2di);
15804 return altivec_expand_lv_builtin (code, exp, target, false);
15806 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15808 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15809 : CODE_FOR_vsx_ld_elemrev_v4sf);
15810 return altivec_expand_lv_builtin (code, exp, target, false);
15812 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15814 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15815 : CODE_FOR_vsx_ld_elemrev_v4si);
15816 return altivec_expand_lv_builtin (code, exp, target, false);
15818 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15820 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15821 : CODE_FOR_vsx_ld_elemrev_v8hi);
15822 return altivec_expand_lv_builtin (code, exp, target, false);
15824 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15826 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15827 : CODE_FOR_vsx_ld_elemrev_v16qi);
15828 return altivec_expand_lv_builtin (code, exp, target, false);
15830 break;
15831 default:
15832 break;
15833 /* Fall through. */
15836 /* XL_BE We initialized them to always load in big endian order. */
15837 switch (fcode)
15839 case VSX_BUILTIN_XL_BE_V2DI:
15841 enum insn_code code = CODE_FOR_vsx_load_v2di;
15842 return altivec_expand_xl_be_builtin (code, exp, target, false);
15844 break;
15845 case VSX_BUILTIN_XL_BE_V4SI:
15847 enum insn_code code = CODE_FOR_vsx_load_v4si;
15848 return altivec_expand_xl_be_builtin (code, exp, target, false);
15850 break;
15851 case VSX_BUILTIN_XL_BE_V8HI:
15853 enum insn_code code = CODE_FOR_vsx_load_v8hi;
15854 return altivec_expand_xl_be_builtin (code, exp, target, false);
15856 break;
15857 case VSX_BUILTIN_XL_BE_V16QI:
15859 enum insn_code code = CODE_FOR_vsx_load_v16qi;
15860 return altivec_expand_xl_be_builtin (code, exp, target, false);
15862 break;
15863 case VSX_BUILTIN_XL_BE_V2DF:
15865 enum insn_code code = CODE_FOR_vsx_load_v2df;
15866 return altivec_expand_xl_be_builtin (code, exp, target, false);
15868 break;
15869 case VSX_BUILTIN_XL_BE_V4SF:
15871 enum insn_code code = CODE_FOR_vsx_load_v4sf;
15872 return altivec_expand_xl_be_builtin (code, exp, target, false);
15874 break;
15875 default:
15876 break;
15877 /* Fall through. */
15880 *expandedp = false;
15881 return NULL_RTX;
15884 /* Expand the builtin in EXP and store the result in TARGET. Store
15885 true in *EXPANDEDP if we found a builtin to expand. */
15886 static rtx
15887 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15889 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15890 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15891 const struct builtin_description *d;
15892 size_t i;
15894 *expandedp = true;
15896 switch (fcode)
15898 case PAIRED_BUILTIN_STX:
15899 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15900 case PAIRED_BUILTIN_LX:
15901 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15902 default:
15903 break;
15904 /* Fall through. */
15907 /* Expand the paired predicates. */
15908 d = bdesc_paired_preds;
15909 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15910 if (d->code == fcode)
15911 return paired_expand_predicate_builtin (d->icode, exp, target);
15913 *expandedp = false;
15914 return NULL_RTX;
15917 static rtx
15918 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15920 rtx pat, scratch, tmp;
15921 tree form = CALL_EXPR_ARG (exp, 0);
15922 tree arg0 = CALL_EXPR_ARG (exp, 1);
15923 tree arg1 = CALL_EXPR_ARG (exp, 2);
15924 rtx op0 = expand_normal (arg0);
15925 rtx op1 = expand_normal (arg1);
15926 machine_mode mode0 = insn_data[icode].operand[1].mode;
15927 machine_mode mode1 = insn_data[icode].operand[2].mode;
15928 int form_int;
15929 enum rtx_code code;
15931 if (TREE_CODE (form) != INTEGER_CST)
15933 error ("argument 1 of %s must be a constant",
15934 "__builtin_paired_predicate");
15935 return const0_rtx;
15937 else
15938 form_int = TREE_INT_CST_LOW (form);
15940 gcc_assert (mode0 == mode1);
15942 if (arg0 == error_mark_node || arg1 == error_mark_node)
15943 return const0_rtx;
15945 if (target == 0
15946 || GET_MODE (target) != SImode
15947 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
15948 target = gen_reg_rtx (SImode);
15949 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15950 op0 = copy_to_mode_reg (mode0, op0);
15951 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15952 op1 = copy_to_mode_reg (mode1, op1);
15954 scratch = gen_reg_rtx (CCFPmode);
15956 pat = GEN_FCN (icode) (scratch, op0, op1);
15957 if (!pat)
15958 return const0_rtx;
15960 emit_insn (pat);
15962 switch (form_int)
15964 /* LT bit. */
15965 case 0:
15966 code = LT;
15967 break;
15968 /* GT bit. */
15969 case 1:
15970 code = GT;
15971 break;
15972 /* EQ bit. */
15973 case 2:
15974 code = EQ;
15975 break;
15976 /* UN bit. */
15977 case 3:
15978 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
15979 return target;
15980 default:
15981 error ("argument 1 of %qs is out of range",
15982 "__builtin_paired_predicate");
15983 return const0_rtx;
15986 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
15987 emit_move_insn (target, tmp);
15988 return target;
15991 /* Raise an error message for a builtin function that is called without the
15992 appropriate target options being set. */
15994 static void
15995 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15997 size_t uns_fncode = (size_t) fncode;
15998 const char *name = rs6000_builtin_info[uns_fncode].name;
15999 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16001 gcc_assert (name != NULL);
16002 if ((fnmask & RS6000_BTM_CELL) != 0)
16003 error ("builtin function %qs is only valid for the cell processor", name);
16004 else if ((fnmask & RS6000_BTM_VSX) != 0)
16005 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16006 else if ((fnmask & RS6000_BTM_HTM) != 0)
16007 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16008 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16009 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16010 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16011 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16012 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16013 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16014 error ("builtin function %qs requires the %qs and %qs options",
16015 name, "-mhard-dfp", "-mpower8-vector");
16016 else if ((fnmask & RS6000_BTM_DFP) != 0)
16017 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16018 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16019 error ("builtin function %qs requires the %qs option", name,
16020 "-mpower8-vector");
16021 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16022 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16023 error ("builtin function %qs requires the %qs and %qs options",
16024 name, "-mcpu=power9", "-m64");
16025 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16026 error ("builtin function %qs requires the %qs option", name,
16027 "-mcpu=power9");
16028 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16029 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16030 error ("builtin function %qs requires the %qs and %qs options",
16031 name, "-mcpu=power9", "-m64");
16032 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16033 error ("builtin function %qs requires the %qs option", name,
16034 "-mcpu=power9");
16035 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16036 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16037 error ("builtin function %qs requires the %qs and %qs options",
16038 name, "-mhard-float", "-mlong-double-128");
16039 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16040 error ("builtin function %qs requires the %qs option", name,
16041 "-mhard-float");
16042 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16043 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16044 else
16045 error ("builtin function %qs is not supported with the current options",
16046 name);
16049 /* Target hook for early folding of built-ins, shamelessly stolen
16050 from ia64.c. */
16052 static tree
16053 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16054 tree *args, bool ignore ATTRIBUTE_UNUSED)
16056 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16058 enum rs6000_builtins fn_code
16059 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16060 switch (fn_code)
16062 case RS6000_BUILTIN_NANQ:
16063 case RS6000_BUILTIN_NANSQ:
16065 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16066 const char *str = c_getstr (*args);
16067 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16068 REAL_VALUE_TYPE real;
16070 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16071 return build_real (type, real);
16072 return NULL_TREE;
16074 case RS6000_BUILTIN_INFQ:
16075 case RS6000_BUILTIN_HUGE_VALQ:
16077 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16078 REAL_VALUE_TYPE inf;
16079 real_inf (&inf);
16080 return build_real (type, inf);
16082 default:
16083 break;
16086 #ifdef SUBTARGET_FOLD_BUILTIN
16087 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16088 #else
16089 return NULL_TREE;
16090 #endif
16093 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16094 a constant, use rs6000_fold_builtin.) */
16096 bool
16097 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16099 gimple *stmt = gsi_stmt (*gsi);
16100 tree fndecl = gimple_call_fndecl (stmt);
16101 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16102 enum rs6000_builtins fn_code
16103 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16104 tree arg0, arg1, lhs;
16106 size_t uns_fncode = (size_t) fn_code;
16107 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16108 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16109 const char *fn_name2 = (icode != CODE_FOR_nothing)
16110 ? get_insn_name ((int) icode)
16111 : "nothing";
16113 if (TARGET_DEBUG_BUILTIN)
16114 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16115 fn_code, fn_name1, fn_name2);
16117 if (!rs6000_fold_gimple)
16118 return false;
16120 /* Generic solution to prevent gimple folding of code without a LHS. */
16121 if (!gimple_call_lhs (stmt))
16122 return false;
16124 switch (fn_code)
16126 /* Flavors of vec_add. We deliberately don't expand
16127 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16128 TImode, resulting in much poorer code generation. */
16129 case ALTIVEC_BUILTIN_VADDUBM:
16130 case ALTIVEC_BUILTIN_VADDUHM:
16131 case ALTIVEC_BUILTIN_VADDUWM:
16132 case P8V_BUILTIN_VADDUDM:
16133 case ALTIVEC_BUILTIN_VADDFP:
16134 case VSX_BUILTIN_XVADDDP:
16136 arg0 = gimple_call_arg (stmt, 0);
16137 arg1 = gimple_call_arg (stmt, 1);
16138 lhs = gimple_call_lhs (stmt);
16139 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16140 gimple_set_location (g, gimple_location (stmt));
16141 gsi_replace (gsi, g, true);
16142 return true;
16144 /* Flavors of vec_sub. We deliberately don't expand
16145 P8V_BUILTIN_VSUBUQM. */
16146 case ALTIVEC_BUILTIN_VSUBUBM:
16147 case ALTIVEC_BUILTIN_VSUBUHM:
16148 case ALTIVEC_BUILTIN_VSUBUWM:
16149 case P8V_BUILTIN_VSUBUDM:
16150 case ALTIVEC_BUILTIN_VSUBFP:
16151 case VSX_BUILTIN_XVSUBDP:
16153 arg0 = gimple_call_arg (stmt, 0);
16154 arg1 = gimple_call_arg (stmt, 1);
16155 lhs = gimple_call_lhs (stmt);
16156 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16157 gimple_set_location (g, gimple_location (stmt));
16158 gsi_replace (gsi, g, true);
16159 return true;
16161 case VSX_BUILTIN_XVMULSP:
16162 case VSX_BUILTIN_XVMULDP:
16164 arg0 = gimple_call_arg (stmt, 0);
16165 arg1 = gimple_call_arg (stmt, 1);
16166 lhs = gimple_call_lhs (stmt);
16167 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16168 gimple_set_location (g, gimple_location (stmt));
16169 gsi_replace (gsi, g, true);
16170 return true;
16172 /* Even element flavors of vec_mul (signed). */
16173 case ALTIVEC_BUILTIN_VMULESB:
16174 case ALTIVEC_BUILTIN_VMULESH:
16175 /* Even element flavors of vec_mul (unsigned). */
16176 case ALTIVEC_BUILTIN_VMULEUB:
16177 case ALTIVEC_BUILTIN_VMULEUH:
16179 arg0 = gimple_call_arg (stmt, 0);
16180 arg1 = gimple_call_arg (stmt, 1);
16181 lhs = gimple_call_lhs (stmt);
16182 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16183 gimple_set_location (g, gimple_location (stmt));
16184 gsi_replace (gsi, g, true);
16185 return true;
16187 /* Odd element flavors of vec_mul (signed). */
16188 case ALTIVEC_BUILTIN_VMULOSB:
16189 case ALTIVEC_BUILTIN_VMULOSH:
16190 /* Odd element flavors of vec_mul (unsigned). */
16191 case ALTIVEC_BUILTIN_VMULOUB:
16192 case ALTIVEC_BUILTIN_VMULOUH:
16194 arg0 = gimple_call_arg (stmt, 0);
16195 arg1 = gimple_call_arg (stmt, 1);
16196 lhs = gimple_call_lhs (stmt);
16197 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16198 gimple_set_location (g, gimple_location (stmt));
16199 gsi_replace (gsi, g, true);
16200 return true;
16202 /* Flavors of vec_div (Integer). */
16203 case VSX_BUILTIN_DIV_V2DI:
16204 case VSX_BUILTIN_UDIV_V2DI:
16206 arg0 = gimple_call_arg (stmt, 0);
16207 arg1 = gimple_call_arg (stmt, 1);
16208 lhs = gimple_call_lhs (stmt);
16209 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16210 gimple_set_location (g, gimple_location (stmt));
16211 gsi_replace (gsi, g, true);
16212 return true;
16214 /* Flavors of vec_div (Float). */
16215 case VSX_BUILTIN_XVDIVSP:
16216 case VSX_BUILTIN_XVDIVDP:
16218 arg0 = gimple_call_arg (stmt, 0);
16219 arg1 = gimple_call_arg (stmt, 1);
16220 lhs = gimple_call_lhs (stmt);
16221 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16222 gimple_set_location (g, gimple_location (stmt));
16223 gsi_replace (gsi, g, true);
16224 return true;
16226 /* Flavors of vec_and. */
16227 case ALTIVEC_BUILTIN_VAND:
16229 arg0 = gimple_call_arg (stmt, 0);
16230 arg1 = gimple_call_arg (stmt, 1);
16231 lhs = gimple_call_lhs (stmt);
16232 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16233 gimple_set_location (g, gimple_location (stmt));
16234 gsi_replace (gsi, g, true);
16235 return true;
16237 /* Flavors of vec_andc. */
16238 case ALTIVEC_BUILTIN_VANDC:
16240 arg0 = gimple_call_arg (stmt, 0);
16241 arg1 = gimple_call_arg (stmt, 1);
16242 lhs = gimple_call_lhs (stmt);
16243 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16244 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16245 gimple_set_location (g, gimple_location (stmt));
16246 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16247 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16248 gimple_set_location (g, gimple_location (stmt));
16249 gsi_replace (gsi, g, true);
16250 return true;
16252 /* Flavors of vec_nand. */
16253 case P8V_BUILTIN_VEC_NAND:
16254 case P8V_BUILTIN_NAND_V16QI:
16255 case P8V_BUILTIN_NAND_V8HI:
16256 case P8V_BUILTIN_NAND_V4SI:
16257 case P8V_BUILTIN_NAND_V4SF:
16258 case P8V_BUILTIN_NAND_V2DF:
16259 case P8V_BUILTIN_NAND_V2DI:
16261 arg0 = gimple_call_arg (stmt, 0);
16262 arg1 = gimple_call_arg (stmt, 1);
16263 lhs = gimple_call_lhs (stmt);
16264 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16265 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16266 gimple_set_location (g, gimple_location (stmt));
16267 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16268 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16269 gimple_set_location (g, gimple_location (stmt));
16270 gsi_replace (gsi, g, true);
16271 return true;
16273 /* Flavors of vec_or. */
16274 case ALTIVEC_BUILTIN_VOR:
16276 arg0 = gimple_call_arg (stmt, 0);
16277 arg1 = gimple_call_arg (stmt, 1);
16278 lhs = gimple_call_lhs (stmt);
16279 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16280 gimple_set_location (g, gimple_location (stmt));
16281 gsi_replace (gsi, g, true);
16282 return true;
16284 /* flavors of vec_orc. */
16285 case P8V_BUILTIN_ORC_V16QI:
16286 case P8V_BUILTIN_ORC_V8HI:
16287 case P8V_BUILTIN_ORC_V4SI:
16288 case P8V_BUILTIN_ORC_V4SF:
16289 case P8V_BUILTIN_ORC_V2DF:
16290 case P8V_BUILTIN_ORC_V2DI:
16292 arg0 = gimple_call_arg (stmt, 0);
16293 arg1 = gimple_call_arg (stmt, 1);
16294 lhs = gimple_call_lhs (stmt);
16295 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16296 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16297 gimple_set_location (g, gimple_location (stmt));
16298 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16299 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16300 gimple_set_location (g, gimple_location (stmt));
16301 gsi_replace (gsi, g, true);
16302 return true;
16304 /* Flavors of vec_xor. */
16305 case ALTIVEC_BUILTIN_VXOR:
16307 arg0 = gimple_call_arg (stmt, 0);
16308 arg1 = gimple_call_arg (stmt, 1);
16309 lhs = gimple_call_lhs (stmt);
16310 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16311 gimple_set_location (g, gimple_location (stmt));
16312 gsi_replace (gsi, g, true);
16313 return true;
16315 /* Flavors of vec_nor. */
16316 case ALTIVEC_BUILTIN_VNOR:
16318 arg0 = gimple_call_arg (stmt, 0);
16319 arg1 = gimple_call_arg (stmt, 1);
16320 lhs = gimple_call_lhs (stmt);
16321 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16322 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16323 gimple_set_location (g, gimple_location (stmt));
16324 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16325 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16326 gimple_set_location (g, gimple_location (stmt));
16327 gsi_replace (gsi, g, true);
16328 return true;
16330 /* flavors of vec_abs. */
16331 case ALTIVEC_BUILTIN_ABS_V16QI:
16332 case ALTIVEC_BUILTIN_ABS_V8HI:
16333 case ALTIVEC_BUILTIN_ABS_V4SI:
16334 case ALTIVEC_BUILTIN_ABS_V4SF:
16335 case P8V_BUILTIN_ABS_V2DI:
16336 case VSX_BUILTIN_XVABSDP:
16338 arg0 = gimple_call_arg (stmt, 0);
16339 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16340 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16341 return false;
16342 lhs = gimple_call_lhs (stmt);
16343 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16344 gimple_set_location (g, gimple_location (stmt));
16345 gsi_replace (gsi, g, true);
16346 return true;
16348 /* flavors of vec_min. */
16349 case VSX_BUILTIN_XVMINDP:
16350 case P8V_BUILTIN_VMINSD:
16351 case P8V_BUILTIN_VMINUD:
16352 case ALTIVEC_BUILTIN_VMINSB:
16353 case ALTIVEC_BUILTIN_VMINSH:
16354 case ALTIVEC_BUILTIN_VMINSW:
16355 case ALTIVEC_BUILTIN_VMINUB:
16356 case ALTIVEC_BUILTIN_VMINUH:
16357 case ALTIVEC_BUILTIN_VMINUW:
16358 case ALTIVEC_BUILTIN_VMINFP:
16360 arg0 = gimple_call_arg (stmt, 0);
16361 arg1 = gimple_call_arg (stmt, 1);
16362 lhs = gimple_call_lhs (stmt);
16363 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16364 gimple_set_location (g, gimple_location (stmt));
16365 gsi_replace (gsi, g, true);
16366 return true;
16368 /* flavors of vec_max. */
16369 case VSX_BUILTIN_XVMAXDP:
16370 case P8V_BUILTIN_VMAXSD:
16371 case P8V_BUILTIN_VMAXUD:
16372 case ALTIVEC_BUILTIN_VMAXSB:
16373 case ALTIVEC_BUILTIN_VMAXSH:
16374 case ALTIVEC_BUILTIN_VMAXSW:
16375 case ALTIVEC_BUILTIN_VMAXUB:
16376 case ALTIVEC_BUILTIN_VMAXUH:
16377 case ALTIVEC_BUILTIN_VMAXUW:
16378 case ALTIVEC_BUILTIN_VMAXFP:
16380 arg0 = gimple_call_arg (stmt, 0);
16381 arg1 = gimple_call_arg (stmt, 1);
16382 lhs = gimple_call_lhs (stmt);
16383 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16384 gimple_set_location (g, gimple_location (stmt));
16385 gsi_replace (gsi, g, true);
16386 return true;
16388 /* Flavors of vec_eqv. */
16389 case P8V_BUILTIN_EQV_V16QI:
16390 case P8V_BUILTIN_EQV_V8HI:
16391 case P8V_BUILTIN_EQV_V4SI:
16392 case P8V_BUILTIN_EQV_V4SF:
16393 case P8V_BUILTIN_EQV_V2DF:
16394 case P8V_BUILTIN_EQV_V2DI:
16396 arg0 = gimple_call_arg (stmt, 0);
16397 arg1 = gimple_call_arg (stmt, 1);
16398 lhs = gimple_call_lhs (stmt);
16399 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16400 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16401 gimple_set_location (g, gimple_location (stmt));
16402 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16403 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16404 gimple_set_location (g, gimple_location (stmt));
16405 gsi_replace (gsi, g, true);
16406 return true;
16408 /* Flavors of vec_rotate_left. */
16409 case ALTIVEC_BUILTIN_VRLB:
16410 case ALTIVEC_BUILTIN_VRLH:
16411 case ALTIVEC_BUILTIN_VRLW:
16412 case P8V_BUILTIN_VRLD:
16414 arg0 = gimple_call_arg (stmt, 0);
16415 arg1 = gimple_call_arg (stmt, 1);
16416 lhs = gimple_call_lhs (stmt);
16417 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16418 gimple_set_location (g, gimple_location (stmt));
16419 gsi_replace (gsi, g, true);
16420 return true;
16422 /* Flavors of vector shift right algebraic.
16423 vec_sra{b,h,w} -> vsra{b,h,w}. */
16424 case ALTIVEC_BUILTIN_VSRAB:
16425 case ALTIVEC_BUILTIN_VSRAH:
16426 case ALTIVEC_BUILTIN_VSRAW:
16427 case P8V_BUILTIN_VSRAD:
16429 arg0 = gimple_call_arg (stmt, 0);
16430 arg1 = gimple_call_arg (stmt, 1);
16431 lhs = gimple_call_lhs (stmt);
16432 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16433 gimple_set_location (g, gimple_location (stmt));
16434 gsi_replace (gsi, g, true);
16435 return true;
16437 /* Flavors of vector shift left.
16438 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16439 case ALTIVEC_BUILTIN_VSLB:
16440 case ALTIVEC_BUILTIN_VSLH:
16441 case ALTIVEC_BUILTIN_VSLW:
16442 case P8V_BUILTIN_VSLD:
16444 arg0 = gimple_call_arg (stmt, 0);
16445 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16446 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16447 return false;
16448 arg1 = gimple_call_arg (stmt, 1);
16449 lhs = gimple_call_lhs (stmt);
16450 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16451 gimple_set_location (g, gimple_location (stmt));
16452 gsi_replace (gsi, g, true);
16453 return true;
16455 /* Flavors of vector shift right. */
16456 case ALTIVEC_BUILTIN_VSRB:
16457 case ALTIVEC_BUILTIN_VSRH:
16458 case ALTIVEC_BUILTIN_VSRW:
16459 case P8V_BUILTIN_VSRD:
16461 arg0 = gimple_call_arg (stmt, 0);
16462 arg1 = gimple_call_arg (stmt, 1);
16463 lhs = gimple_call_lhs (stmt);
16464 gimple_seq stmts = NULL;
16465 /* Convert arg0 to unsigned. */
16466 tree arg0_unsigned
16467 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16468 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16469 tree res
16470 = gimple_build (&stmts, RSHIFT_EXPR,
16471 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16472 /* Convert result back to the lhs type. */
16473 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16474 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16475 update_call_from_tree (gsi, res);
16476 return true;
16478 default:
16479 if (TARGET_DEBUG_BUILTIN)
16480 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16481 fn_code, fn_name1, fn_name2);
16482 break;
16485 return false;
16488 /* Expand an expression EXP that calls a built-in function,
16489 with result going to TARGET if that's convenient
16490 (and in mode MODE if that's convenient).
16491 SUBTARGET may be used as the target for computing one of EXP's operands.
16492 IGNORE is nonzero if the value is to be ignored. */
16494 static rtx
16495 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16496 machine_mode mode ATTRIBUTE_UNUSED,
16497 int ignore ATTRIBUTE_UNUSED)
16499 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16500 enum rs6000_builtins fcode
16501 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16502 size_t uns_fcode = (size_t)fcode;
16503 const struct builtin_description *d;
16504 size_t i;
16505 rtx ret;
16506 bool success;
16507 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16508 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16510 if (TARGET_DEBUG_BUILTIN)
16512 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16513 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16514 const char *name2 = (icode != CODE_FOR_nothing)
16515 ? get_insn_name ((int) icode)
16516 : "nothing";
16517 const char *name3;
16519 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16521 default: name3 = "unknown"; break;
16522 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16523 case RS6000_BTC_UNARY: name3 = "unary"; break;
16524 case RS6000_BTC_BINARY: name3 = "binary"; break;
16525 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16526 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16527 case RS6000_BTC_ABS: name3 = "abs"; break;
16528 case RS6000_BTC_DST: name3 = "dst"; break;
16532 fprintf (stderr,
16533 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16534 (name1) ? name1 : "---", fcode,
16535 (name2) ? name2 : "---", (int) icode,
16536 name3,
16537 func_valid_p ? "" : ", not valid");
16540 if (!func_valid_p)
16542 rs6000_invalid_builtin (fcode);
16544 /* Given it is invalid, just generate a normal call. */
16545 return expand_call (exp, target, ignore);
16548 switch (fcode)
16550 case RS6000_BUILTIN_RECIP:
16551 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16553 case RS6000_BUILTIN_RECIPF:
16554 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16556 case RS6000_BUILTIN_RSQRTF:
16557 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16559 case RS6000_BUILTIN_RSQRT:
16560 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16562 case POWER7_BUILTIN_BPERMD:
16563 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16564 ? CODE_FOR_bpermd_di
16565 : CODE_FOR_bpermd_si), exp, target);
16567 case RS6000_BUILTIN_GET_TB:
16568 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16569 target);
16571 case RS6000_BUILTIN_MFTB:
16572 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16573 ? CODE_FOR_rs6000_mftb_di
16574 : CODE_FOR_rs6000_mftb_si),
16575 target);
16577 case RS6000_BUILTIN_MFFS:
16578 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16580 case RS6000_BUILTIN_MTFSF:
16581 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16583 case RS6000_BUILTIN_CPU_INIT:
16584 case RS6000_BUILTIN_CPU_IS:
16585 case RS6000_BUILTIN_CPU_SUPPORTS:
16586 return cpu_expand_builtin (fcode, exp, target);
16588 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16589 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16591 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16592 : (int) CODE_FOR_altivec_lvsl_direct);
16593 machine_mode tmode = insn_data[icode].operand[0].mode;
16594 machine_mode mode = insn_data[icode].operand[1].mode;
16595 tree arg;
16596 rtx op, addr, pat;
16598 gcc_assert (TARGET_ALTIVEC);
16600 arg = CALL_EXPR_ARG (exp, 0);
16601 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16602 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16603 addr = memory_address (mode, op);
16604 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16605 op = addr;
16606 else
16608 /* For the load case need to negate the address. */
16609 op = gen_reg_rtx (GET_MODE (addr));
16610 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16612 op = gen_rtx_MEM (mode, op);
16614 if (target == 0
16615 || GET_MODE (target) != tmode
16616 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16617 target = gen_reg_rtx (tmode);
16619 pat = GEN_FCN (icode) (target, op);
16620 if (!pat)
16621 return 0;
16622 emit_insn (pat);
16624 return target;
16627 case ALTIVEC_BUILTIN_VCFUX:
16628 case ALTIVEC_BUILTIN_VCFSX:
16629 case ALTIVEC_BUILTIN_VCTUXS:
16630 case ALTIVEC_BUILTIN_VCTSXS:
16631 /* FIXME: There's got to be a nicer way to handle this case than
16632 constructing a new CALL_EXPR. */
16633 if (call_expr_nargs (exp) == 1)
16635 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16636 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16638 break;
16640 default:
16641 break;
16644 if (TARGET_ALTIVEC)
16646 ret = altivec_expand_builtin (exp, target, &success);
16648 if (success)
16649 return ret;
16651 if (TARGET_PAIRED_FLOAT)
16653 ret = paired_expand_builtin (exp, target, &success);
16655 if (success)
16656 return ret;
16658 if (TARGET_HTM)
16660 ret = htm_expand_builtin (exp, target, &success);
16662 if (success)
16663 return ret;
16666 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16667 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16668 gcc_assert (attr == RS6000_BTC_UNARY
16669 || attr == RS6000_BTC_BINARY
16670 || attr == RS6000_BTC_TERNARY
16671 || attr == RS6000_BTC_SPECIAL);
16673 /* Handle simple unary operations. */
16674 d = bdesc_1arg;
16675 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16676 if (d->code == fcode)
16677 return rs6000_expand_unop_builtin (d->icode, exp, target);
16679 /* Handle simple binary operations. */
16680 d = bdesc_2arg;
16681 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16682 if (d->code == fcode)
16683 return rs6000_expand_binop_builtin (d->icode, exp, target);
16685 /* Handle simple ternary operations. */
16686 d = bdesc_3arg;
16687 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16688 if (d->code == fcode)
16689 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16691 /* Handle simple no-argument operations. */
16692 d = bdesc_0arg;
16693 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16694 if (d->code == fcode)
16695 return rs6000_expand_zeroop_builtin (d->icode, target);
16697 gcc_unreachable ();
16700 /* Create a builtin vector type with a name. Taking care not to give
16701 the canonical type a name. */
16703 static tree
16704 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16706 tree result = build_vector_type (elt_type, num_elts);
16708 /* Copy so we don't give the canonical type a name. */
16709 result = build_variant_type_copy (result);
16711 add_builtin_type (name, result);
16713 return result;
16716 static void
16717 rs6000_init_builtins (void)
16719 tree tdecl;
16720 tree ftype;
16721 machine_mode mode;
16723 if (TARGET_DEBUG_BUILTIN)
16724 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16725 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16726 (TARGET_ALTIVEC) ? ", altivec" : "",
16727 (TARGET_VSX) ? ", vsx" : "");
16729 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16730 V2SF_type_node = build_vector_type (float_type_node, 2);
16731 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16732 : "__vector long long",
16733 intDI_type_node, 2);
16734 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16735 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16736 intSI_type_node, 4);
16737 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16738 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16739 intHI_type_node, 8);
16740 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16741 intQI_type_node, 16);
16743 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16744 unsigned_intQI_type_node, 16);
16745 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16746 unsigned_intHI_type_node, 8);
16747 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16748 unsigned_intSI_type_node, 4);
16749 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16750 ? "__vector unsigned long"
16751 : "__vector unsigned long long",
16752 unsigned_intDI_type_node, 2);
16754 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16755 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16756 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16757 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16759 const_str_type_node
16760 = build_pointer_type (build_qualified_type (char_type_node,
16761 TYPE_QUAL_CONST));
16763 /* We use V1TI mode as a special container to hold __int128_t items that
16764 must live in VSX registers. */
16765 if (intTI_type_node)
16767 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16768 intTI_type_node, 1);
16769 unsigned_V1TI_type_node
16770 = rs6000_vector_type ("__vector unsigned __int128",
16771 unsigned_intTI_type_node, 1);
16774 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16775 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16776 'vector unsigned short'. */
16778 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16779 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16780 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16781 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16782 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16784 long_integer_type_internal_node = long_integer_type_node;
16785 long_unsigned_type_internal_node = long_unsigned_type_node;
16786 long_long_integer_type_internal_node = long_long_integer_type_node;
16787 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16788 intQI_type_internal_node = intQI_type_node;
16789 uintQI_type_internal_node = unsigned_intQI_type_node;
16790 intHI_type_internal_node = intHI_type_node;
16791 uintHI_type_internal_node = unsigned_intHI_type_node;
16792 intSI_type_internal_node = intSI_type_node;
16793 uintSI_type_internal_node = unsigned_intSI_type_node;
16794 intDI_type_internal_node = intDI_type_node;
16795 uintDI_type_internal_node = unsigned_intDI_type_node;
16796 intTI_type_internal_node = intTI_type_node;
16797 uintTI_type_internal_node = unsigned_intTI_type_node;
16798 float_type_internal_node = float_type_node;
16799 double_type_internal_node = double_type_node;
16800 long_double_type_internal_node = long_double_type_node;
16801 dfloat64_type_internal_node = dfloat64_type_node;
16802 dfloat128_type_internal_node = dfloat128_type_node;
16803 void_type_internal_node = void_type_node;
16805 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16806 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16807 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16808 format that uses a pair of doubles, depending on the switches and
16809 defaults.
16811 We do not enable the actual __float128 keyword unless the user explicitly
16812 asks for it, because the library support is not yet complete.
16814 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16815 floating point, we need make sure the type is non-zero or else self-test
16816 fails during bootstrap.
16818 We don't register a built-in type for __ibm128 if the type is the same as
16819 long double. Instead we add a #define for __ibm128 in
16820 rs6000_cpu_cpp_builtins to long double. */
16821 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16823 ibm128_float_type_node = make_node (REAL_TYPE);
16824 TYPE_PRECISION (ibm128_float_type_node) = 128;
16825 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16826 layout_type (ibm128_float_type_node);
16828 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16829 "__ibm128");
16831 else
16832 ibm128_float_type_node = long_double_type_node;
16834 if (TARGET_FLOAT128_KEYWORD)
16836 ieee128_float_type_node = float128_type_node;
16837 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16838 "__float128");
16841 else if (TARGET_FLOAT128_TYPE)
16843 ieee128_float_type_node = make_node (REAL_TYPE);
16844 TYPE_PRECISION (ibm128_float_type_node) = 128;
16845 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
16846 layout_type (ieee128_float_type_node);
16848 /* If we are not exporting the __float128/_Float128 keywords, we need a
16849 keyword to get the types created. Use __ieee128 as the dummy
16850 keyword. */
16851 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16852 "__ieee128");
16855 else
16856 ieee128_float_type_node = long_double_type_node;
16858 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16859 tree type node. */
16860 builtin_mode_to_type[QImode][0] = integer_type_node;
16861 builtin_mode_to_type[HImode][0] = integer_type_node;
16862 builtin_mode_to_type[SImode][0] = intSI_type_node;
16863 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16864 builtin_mode_to_type[DImode][0] = intDI_type_node;
16865 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16866 builtin_mode_to_type[TImode][0] = intTI_type_node;
16867 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16868 builtin_mode_to_type[SFmode][0] = float_type_node;
16869 builtin_mode_to_type[DFmode][0] = double_type_node;
16870 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16871 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16872 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16873 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16874 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16875 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16876 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16877 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16878 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16879 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16880 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16881 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16882 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16883 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16884 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16885 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16886 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16887 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16888 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16890 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16891 TYPE_NAME (bool_char_type_node) = tdecl;
16893 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16894 TYPE_NAME (bool_short_type_node) = tdecl;
16896 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16897 TYPE_NAME (bool_int_type_node) = tdecl;
16899 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16900 TYPE_NAME (pixel_type_node) = tdecl;
16902 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16903 bool_char_type_node, 16);
16904 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16905 bool_short_type_node, 8);
16906 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16907 bool_int_type_node, 4);
16908 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16909 ? "__vector __bool long"
16910 : "__vector __bool long long",
16911 bool_long_type_node, 2);
16912 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16913 pixel_type_node, 8);
16915 /* Paired builtins are only available if you build a compiler with the
16916 appropriate options, so only create those builtins with the appropriate
16917 compiler option. Create Altivec and VSX builtins on machines with at
16918 least the general purpose extensions (970 and newer) to allow the use of
16919 the target attribute. */
16920 if (TARGET_PAIRED_FLOAT)
16921 paired_init_builtins ();
16922 if (TARGET_EXTRA_BUILTINS)
16923 altivec_init_builtins ();
16924 if (TARGET_HTM)
16925 htm_init_builtins ();
16927 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
16928 rs6000_common_init_builtins ();
16930 ftype = build_function_type_list (ieee128_float_type_node,
16931 const_str_type_node, NULL_TREE);
16932 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16933 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16935 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16936 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
16937 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
16939 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16940 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16941 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16943 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16944 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16945 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16947 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16948 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16949 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16951 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16952 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16953 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16955 mode = (TARGET_64BIT) ? DImode : SImode;
16956 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16957 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16958 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16960 ftype = build_function_type_list (unsigned_intDI_type_node,
16961 NULL_TREE);
16962 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16964 if (TARGET_64BIT)
16965 ftype = build_function_type_list (unsigned_intDI_type_node,
16966 NULL_TREE);
16967 else
16968 ftype = build_function_type_list (unsigned_intSI_type_node,
16969 NULL_TREE);
16970 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16972 ftype = build_function_type_list (double_type_node, NULL_TREE);
16973 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16975 ftype = build_function_type_list (void_type_node,
16976 intSI_type_node, double_type_node,
16977 NULL_TREE);
16978 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16980 ftype = build_function_type_list (void_type_node, NULL_TREE);
16981 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16983 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16984 NULL_TREE);
16985 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16986 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16988 /* AIX libm provides clog as __clog. */
16989 if (TARGET_XCOFF &&
16990 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16991 set_user_assembler_name (tdecl, "__clog");
16993 #ifdef SUBTARGET_INIT_BUILTINS
16994 SUBTARGET_INIT_BUILTINS;
16995 #endif
16998 /* Returns the rs6000 builtin decl for CODE. */
17000 static tree
17001 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17003 HOST_WIDE_INT fnmask;
17005 if (code >= RS6000_BUILTIN_COUNT)
17006 return error_mark_node;
17008 fnmask = rs6000_builtin_info[code].mask;
17009 if ((fnmask & rs6000_builtin_mask) != fnmask)
17011 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17012 return error_mark_node;
17015 return rs6000_builtin_decls[code];
17018 static void
17019 paired_init_builtins (void)
17021 const struct builtin_description *d;
17022 size_t i;
17023 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17025 tree int_ftype_int_v2sf_v2sf
17026 = build_function_type_list (integer_type_node,
17027 integer_type_node,
17028 V2SF_type_node,
17029 V2SF_type_node,
17030 NULL_TREE);
17031 tree pcfloat_type_node =
17032 build_pointer_type (build_qualified_type
17033 (float_type_node, TYPE_QUAL_CONST));
17035 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17036 long_integer_type_node,
17037 pcfloat_type_node,
17038 NULL_TREE);
17039 tree void_ftype_v2sf_long_pcfloat =
17040 build_function_type_list (void_type_node,
17041 V2SF_type_node,
17042 long_integer_type_node,
17043 pcfloat_type_node,
17044 NULL_TREE);
17047 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17048 PAIRED_BUILTIN_LX);
17051 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17052 PAIRED_BUILTIN_STX);
17054 /* Predicates. */
17055 d = bdesc_paired_preds;
17056 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17058 tree type;
17059 HOST_WIDE_INT mask = d->mask;
17061 if ((mask & builtin_mask) != mask)
17063 if (TARGET_DEBUG_BUILTIN)
17064 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17065 d->name);
17066 continue;
17069 /* Cannot define builtin if the instruction is disabled. */
17070 gcc_assert (d->icode != CODE_FOR_nothing);
17072 if (TARGET_DEBUG_BUILTIN)
17073 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17074 (int)i, get_insn_name (d->icode), (int)d->icode,
17075 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17077 switch (insn_data[d->icode].operand[1].mode)
17079 case E_V2SFmode:
17080 type = int_ftype_int_v2sf_v2sf;
17081 break;
17082 default:
17083 gcc_unreachable ();
17086 def_builtin (d->name, type, d->code);
17090 static void
17091 altivec_init_builtins (void)
17093 const struct builtin_description *d;
17094 size_t i;
17095 tree ftype;
17096 tree decl;
17097 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17099 tree pvoid_type_node = build_pointer_type (void_type_node);
17101 tree pcvoid_type_node
17102 = build_pointer_type (build_qualified_type (void_type_node,
17103 TYPE_QUAL_CONST));
17105 tree int_ftype_opaque
17106 = build_function_type_list (integer_type_node,
17107 opaque_V4SI_type_node, NULL_TREE);
17108 tree opaque_ftype_opaque
17109 = build_function_type_list (integer_type_node, NULL_TREE);
17110 tree opaque_ftype_opaque_int
17111 = build_function_type_list (opaque_V4SI_type_node,
17112 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17113 tree opaque_ftype_opaque_opaque_int
17114 = build_function_type_list (opaque_V4SI_type_node,
17115 opaque_V4SI_type_node, opaque_V4SI_type_node,
17116 integer_type_node, NULL_TREE);
17117 tree opaque_ftype_opaque_opaque_opaque
17118 = build_function_type_list (opaque_V4SI_type_node,
17119 opaque_V4SI_type_node, opaque_V4SI_type_node,
17120 opaque_V4SI_type_node, NULL_TREE);
17121 tree opaque_ftype_opaque_opaque
17122 = build_function_type_list (opaque_V4SI_type_node,
17123 opaque_V4SI_type_node, opaque_V4SI_type_node,
17124 NULL_TREE);
17125 tree int_ftype_int_opaque_opaque
17126 = build_function_type_list (integer_type_node,
17127 integer_type_node, opaque_V4SI_type_node,
17128 opaque_V4SI_type_node, NULL_TREE);
17129 tree int_ftype_int_v4si_v4si
17130 = build_function_type_list (integer_type_node,
17131 integer_type_node, V4SI_type_node,
17132 V4SI_type_node, NULL_TREE);
17133 tree int_ftype_int_v2di_v2di
17134 = build_function_type_list (integer_type_node,
17135 integer_type_node, V2DI_type_node,
17136 V2DI_type_node, NULL_TREE);
17137 tree void_ftype_v4si
17138 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17139 tree v8hi_ftype_void
17140 = build_function_type_list (V8HI_type_node, NULL_TREE);
17141 tree void_ftype_void
17142 = build_function_type_list (void_type_node, NULL_TREE);
17143 tree void_ftype_int
17144 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17146 tree opaque_ftype_long_pcvoid
17147 = build_function_type_list (opaque_V4SI_type_node,
17148 long_integer_type_node, pcvoid_type_node,
17149 NULL_TREE);
17150 tree v16qi_ftype_long_pcvoid
17151 = build_function_type_list (V16QI_type_node,
17152 long_integer_type_node, pcvoid_type_node,
17153 NULL_TREE);
17154 tree v8hi_ftype_long_pcvoid
17155 = build_function_type_list (V8HI_type_node,
17156 long_integer_type_node, pcvoid_type_node,
17157 NULL_TREE);
17158 tree v4si_ftype_long_pcvoid
17159 = build_function_type_list (V4SI_type_node,
17160 long_integer_type_node, pcvoid_type_node,
17161 NULL_TREE);
17162 tree v4sf_ftype_long_pcvoid
17163 = build_function_type_list (V4SF_type_node,
17164 long_integer_type_node, pcvoid_type_node,
17165 NULL_TREE);
17166 tree v2df_ftype_long_pcvoid
17167 = build_function_type_list (V2DF_type_node,
17168 long_integer_type_node, pcvoid_type_node,
17169 NULL_TREE);
17170 tree v2di_ftype_long_pcvoid
17171 = build_function_type_list (V2DI_type_node,
17172 long_integer_type_node, pcvoid_type_node,
17173 NULL_TREE);
17175 tree void_ftype_opaque_long_pvoid
17176 = build_function_type_list (void_type_node,
17177 opaque_V4SI_type_node, long_integer_type_node,
17178 pvoid_type_node, NULL_TREE);
17179 tree void_ftype_v4si_long_pvoid
17180 = build_function_type_list (void_type_node,
17181 V4SI_type_node, long_integer_type_node,
17182 pvoid_type_node, NULL_TREE);
17183 tree void_ftype_v16qi_long_pvoid
17184 = build_function_type_list (void_type_node,
17185 V16QI_type_node, long_integer_type_node,
17186 pvoid_type_node, NULL_TREE);
17188 tree void_ftype_v16qi_pvoid_long
17189 = build_function_type_list (void_type_node,
17190 V16QI_type_node, pvoid_type_node,
17191 long_integer_type_node, NULL_TREE);
17193 tree void_ftype_v8hi_long_pvoid
17194 = build_function_type_list (void_type_node,
17195 V8HI_type_node, long_integer_type_node,
17196 pvoid_type_node, NULL_TREE);
17197 tree void_ftype_v4sf_long_pvoid
17198 = build_function_type_list (void_type_node,
17199 V4SF_type_node, long_integer_type_node,
17200 pvoid_type_node, NULL_TREE);
17201 tree void_ftype_v2df_long_pvoid
17202 = build_function_type_list (void_type_node,
17203 V2DF_type_node, long_integer_type_node,
17204 pvoid_type_node, NULL_TREE);
17205 tree void_ftype_v2di_long_pvoid
17206 = build_function_type_list (void_type_node,
17207 V2DI_type_node, long_integer_type_node,
17208 pvoid_type_node, NULL_TREE);
17209 tree int_ftype_int_v8hi_v8hi
17210 = build_function_type_list (integer_type_node,
17211 integer_type_node, V8HI_type_node,
17212 V8HI_type_node, NULL_TREE);
17213 tree int_ftype_int_v16qi_v16qi
17214 = build_function_type_list (integer_type_node,
17215 integer_type_node, V16QI_type_node,
17216 V16QI_type_node, NULL_TREE);
17217 tree int_ftype_int_v4sf_v4sf
17218 = build_function_type_list (integer_type_node,
17219 integer_type_node, V4SF_type_node,
17220 V4SF_type_node, NULL_TREE);
17221 tree int_ftype_int_v2df_v2df
17222 = build_function_type_list (integer_type_node,
17223 integer_type_node, V2DF_type_node,
17224 V2DF_type_node, NULL_TREE);
17225 tree v2di_ftype_v2di
17226 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17227 tree v4si_ftype_v4si
17228 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17229 tree v8hi_ftype_v8hi
17230 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17231 tree v16qi_ftype_v16qi
17232 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17233 tree v4sf_ftype_v4sf
17234 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17235 tree v2df_ftype_v2df
17236 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17237 tree void_ftype_pcvoid_int_int
17238 = build_function_type_list (void_type_node,
17239 pcvoid_type_node, integer_type_node,
17240 integer_type_node, NULL_TREE);
17242 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17243 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17244 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17245 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17246 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17247 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17248 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17249 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17250 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17251 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17252 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17253 ALTIVEC_BUILTIN_LVXL_V2DF);
17254 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17255 ALTIVEC_BUILTIN_LVXL_V2DI);
17256 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17257 ALTIVEC_BUILTIN_LVXL_V4SF);
17258 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17259 ALTIVEC_BUILTIN_LVXL_V4SI);
17260 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17261 ALTIVEC_BUILTIN_LVXL_V8HI);
17262 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17263 ALTIVEC_BUILTIN_LVXL_V16QI);
17264 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17265 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17266 ALTIVEC_BUILTIN_LVX_V2DF);
17267 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17268 ALTIVEC_BUILTIN_LVX_V2DI);
17269 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17270 ALTIVEC_BUILTIN_LVX_V4SF);
17271 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17272 ALTIVEC_BUILTIN_LVX_V4SI);
17273 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17274 ALTIVEC_BUILTIN_LVX_V8HI);
17275 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17276 ALTIVEC_BUILTIN_LVX_V16QI);
17277 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17278 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17279 ALTIVEC_BUILTIN_STVX_V2DF);
17280 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17281 ALTIVEC_BUILTIN_STVX_V2DI);
17282 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17283 ALTIVEC_BUILTIN_STVX_V4SF);
17284 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17285 ALTIVEC_BUILTIN_STVX_V4SI);
17286 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17287 ALTIVEC_BUILTIN_STVX_V8HI);
17288 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17289 ALTIVEC_BUILTIN_STVX_V16QI);
17290 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17291 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17292 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17293 ALTIVEC_BUILTIN_STVXL_V2DF);
17294 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17295 ALTIVEC_BUILTIN_STVXL_V2DI);
17296 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17297 ALTIVEC_BUILTIN_STVXL_V4SF);
17298 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17299 ALTIVEC_BUILTIN_STVXL_V4SI);
17300 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17301 ALTIVEC_BUILTIN_STVXL_V8HI);
17302 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17303 ALTIVEC_BUILTIN_STVXL_V16QI);
17304 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17305 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17306 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17307 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17308 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17309 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17310 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17311 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17312 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17313 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17314 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17315 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17316 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17317 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17318 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17319 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17321 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17322 VSX_BUILTIN_LXVD2X_V2DF);
17323 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17324 VSX_BUILTIN_LXVD2X_V2DI);
17325 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17326 VSX_BUILTIN_LXVW4X_V4SF);
17327 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17328 VSX_BUILTIN_LXVW4X_V4SI);
17329 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17330 VSX_BUILTIN_LXVW4X_V8HI);
17331 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17332 VSX_BUILTIN_LXVW4X_V16QI);
17333 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17334 VSX_BUILTIN_STXVD2X_V2DF);
17335 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17336 VSX_BUILTIN_STXVD2X_V2DI);
17337 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17338 VSX_BUILTIN_STXVW4X_V4SF);
17339 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17340 VSX_BUILTIN_STXVW4X_V4SI);
17341 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17342 VSX_BUILTIN_STXVW4X_V8HI);
17343 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17344 VSX_BUILTIN_STXVW4X_V16QI);
17346 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17347 VSX_BUILTIN_LD_ELEMREV_V2DF);
17348 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17349 VSX_BUILTIN_LD_ELEMREV_V2DI);
17350 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17351 VSX_BUILTIN_LD_ELEMREV_V4SF);
17352 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17353 VSX_BUILTIN_LD_ELEMREV_V4SI);
17354 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17355 VSX_BUILTIN_ST_ELEMREV_V2DF);
17356 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17357 VSX_BUILTIN_ST_ELEMREV_V2DI);
17358 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17359 VSX_BUILTIN_ST_ELEMREV_V4SF);
17360 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17361 VSX_BUILTIN_ST_ELEMREV_V4SI);
17363 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
17364 VSX_BUILTIN_XL_BE_V8HI);
17365 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
17366 VSX_BUILTIN_XL_BE_V4SI);
17367 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
17368 VSX_BUILTIN_XL_BE_V2DI);
17369 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
17370 VSX_BUILTIN_XL_BE_V4SF);
17371 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
17372 VSX_BUILTIN_XL_BE_V2DF);
17373 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
17374 VSX_BUILTIN_XL_BE_V16QI);
17376 if (TARGET_P9_VECTOR)
17378 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17379 VSX_BUILTIN_LD_ELEMREV_V8HI);
17380 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17381 VSX_BUILTIN_LD_ELEMREV_V16QI);
17382 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17383 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17384 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17385 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17387 else
17389 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
17390 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
17391 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
17392 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
17393 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
17394 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
17395 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
17396 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
17399 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17400 VSX_BUILTIN_VEC_LD);
17401 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17402 VSX_BUILTIN_VEC_ST);
17403 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17404 VSX_BUILTIN_VEC_XL);
17405 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17406 VSX_BUILTIN_VEC_XL_BE);
17407 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17408 VSX_BUILTIN_VEC_XST);
17410 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17411 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17412 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17414 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17415 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17416 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17417 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17418 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17419 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17420 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17421 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17422 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17423 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17424 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17425 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17427 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17428 ALTIVEC_BUILTIN_VEC_ADDE);
17429 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17430 ALTIVEC_BUILTIN_VEC_ADDEC);
17431 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17432 ALTIVEC_BUILTIN_VEC_CMPNE);
17433 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17434 ALTIVEC_BUILTIN_VEC_MUL);
17435 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17436 ALTIVEC_BUILTIN_VEC_SUBE);
17437 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17438 ALTIVEC_BUILTIN_VEC_SUBEC);
17440 /* Cell builtins. */
17441 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17442 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17443 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17444 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17446 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17447 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17448 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17449 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17451 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17452 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17453 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17454 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17456 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17457 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17458 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17459 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17461 if (TARGET_P9_VECTOR)
17462 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17463 P9V_BUILTIN_STXVL);
17465 /* Add the DST variants. */
17466 d = bdesc_dst;
17467 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17469 HOST_WIDE_INT mask = d->mask;
17471 /* It is expected that these dst built-in functions may have
17472 d->icode equal to CODE_FOR_nothing. */
17473 if ((mask & builtin_mask) != mask)
17475 if (TARGET_DEBUG_BUILTIN)
17476 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17477 d->name);
17478 continue;
17480 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17483 /* Initialize the predicates. */
17484 d = bdesc_altivec_preds;
17485 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17487 machine_mode mode1;
17488 tree type;
17489 HOST_WIDE_INT mask = d->mask;
17491 if ((mask & builtin_mask) != mask)
17493 if (TARGET_DEBUG_BUILTIN)
17494 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17495 d->name);
17496 continue;
17499 if (rs6000_overloaded_builtin_p (d->code))
17500 mode1 = VOIDmode;
17501 else
17503 /* Cannot define builtin if the instruction is disabled. */
17504 gcc_assert (d->icode != CODE_FOR_nothing);
17505 mode1 = insn_data[d->icode].operand[1].mode;
17508 switch (mode1)
17510 case E_VOIDmode:
17511 type = int_ftype_int_opaque_opaque;
17512 break;
17513 case E_V2DImode:
17514 type = int_ftype_int_v2di_v2di;
17515 break;
17516 case E_V4SImode:
17517 type = int_ftype_int_v4si_v4si;
17518 break;
17519 case E_V8HImode:
17520 type = int_ftype_int_v8hi_v8hi;
17521 break;
17522 case E_V16QImode:
17523 type = int_ftype_int_v16qi_v16qi;
17524 break;
17525 case E_V4SFmode:
17526 type = int_ftype_int_v4sf_v4sf;
17527 break;
17528 case E_V2DFmode:
17529 type = int_ftype_int_v2df_v2df;
17530 break;
17531 default:
17532 gcc_unreachable ();
17535 def_builtin (d->name, type, d->code);
17538 /* Initialize the abs* operators. */
17539 d = bdesc_abs;
17540 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17542 machine_mode mode0;
17543 tree type;
17544 HOST_WIDE_INT mask = d->mask;
17546 if ((mask & builtin_mask) != mask)
17548 if (TARGET_DEBUG_BUILTIN)
17549 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17550 d->name);
17551 continue;
17554 /* Cannot define builtin if the instruction is disabled. */
17555 gcc_assert (d->icode != CODE_FOR_nothing);
17556 mode0 = insn_data[d->icode].operand[0].mode;
17558 switch (mode0)
17560 case E_V2DImode:
17561 type = v2di_ftype_v2di;
17562 break;
17563 case E_V4SImode:
17564 type = v4si_ftype_v4si;
17565 break;
17566 case E_V8HImode:
17567 type = v8hi_ftype_v8hi;
17568 break;
17569 case E_V16QImode:
17570 type = v16qi_ftype_v16qi;
17571 break;
17572 case E_V4SFmode:
17573 type = v4sf_ftype_v4sf;
17574 break;
17575 case E_V2DFmode:
17576 type = v2df_ftype_v2df;
17577 break;
17578 default:
17579 gcc_unreachable ();
17582 def_builtin (d->name, type, d->code);
17585 /* Initialize target builtin that implements
17586 targetm.vectorize.builtin_mask_for_load. */
17588 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17589 v16qi_ftype_long_pcvoid,
17590 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17591 BUILT_IN_MD, NULL, NULL_TREE);
17592 TREE_READONLY (decl) = 1;
17593 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17594 altivec_builtin_mask_for_load = decl;
17596 /* Access to the vec_init patterns. */
17597 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17598 integer_type_node, integer_type_node,
17599 integer_type_node, NULL_TREE);
17600 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17602 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17603 short_integer_type_node,
17604 short_integer_type_node,
17605 short_integer_type_node,
17606 short_integer_type_node,
17607 short_integer_type_node,
17608 short_integer_type_node,
17609 short_integer_type_node, NULL_TREE);
17610 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17612 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17613 char_type_node, char_type_node,
17614 char_type_node, char_type_node,
17615 char_type_node, char_type_node,
17616 char_type_node, char_type_node,
17617 char_type_node, char_type_node,
17618 char_type_node, char_type_node,
17619 char_type_node, char_type_node,
17620 char_type_node, NULL_TREE);
17621 def_builtin ("__builtin_vec_init_v16qi", ftype,
17622 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17624 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17625 float_type_node, float_type_node,
17626 float_type_node, NULL_TREE);
17627 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17629 /* VSX builtins. */
17630 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17631 double_type_node, NULL_TREE);
17632 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17634 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17635 intDI_type_node, NULL_TREE);
17636 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17638 /* Access to the vec_set patterns. */
17639 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17640 intSI_type_node,
17641 integer_type_node, NULL_TREE);
17642 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17644 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17645 intHI_type_node,
17646 integer_type_node, NULL_TREE);
17647 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17649 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17650 intQI_type_node,
17651 integer_type_node, NULL_TREE);
17652 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17654 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17655 float_type_node,
17656 integer_type_node, NULL_TREE);
17657 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17659 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17660 double_type_node,
17661 integer_type_node, NULL_TREE);
17662 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17664 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17665 intDI_type_node,
17666 integer_type_node, NULL_TREE);
17667 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17669 /* Access to the vec_extract patterns. */
17670 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17671 integer_type_node, NULL_TREE);
17672 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17674 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17675 integer_type_node, NULL_TREE);
17676 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17678 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17679 integer_type_node, NULL_TREE);
17680 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17682 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17683 integer_type_node, NULL_TREE);
17684 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17686 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17687 integer_type_node, NULL_TREE);
17688 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17690 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17691 integer_type_node, NULL_TREE);
17692 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17695 if (V1TI_type_node)
17697 tree v1ti_ftype_long_pcvoid
17698 = build_function_type_list (V1TI_type_node,
17699 long_integer_type_node, pcvoid_type_node,
17700 NULL_TREE);
17701 tree void_ftype_v1ti_long_pvoid
17702 = build_function_type_list (void_type_node,
17703 V1TI_type_node, long_integer_type_node,
17704 pvoid_type_node, NULL_TREE);
17705 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17706 VSX_BUILTIN_LXVD2X_V1TI);
17707 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17708 VSX_BUILTIN_STXVD2X_V1TI);
17709 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17710 NULL_TREE, NULL_TREE);
17711 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17712 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17713 intTI_type_node,
17714 integer_type_node, NULL_TREE);
17715 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17716 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17717 integer_type_node, NULL_TREE);
17718 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17723 static void
17724 htm_init_builtins (void)
17726 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17727 const struct builtin_description *d;
17728 size_t i;
17730 d = bdesc_htm;
17731 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17733 tree op[MAX_HTM_OPERANDS], type;
17734 HOST_WIDE_INT mask = d->mask;
17735 unsigned attr = rs6000_builtin_info[d->code].attr;
17736 bool void_func = (attr & RS6000_BTC_VOID);
17737 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17738 int nopnds = 0;
17739 tree gpr_type_node;
17740 tree rettype;
17741 tree argtype;
17743 /* It is expected that these htm built-in functions may have
17744 d->icode equal to CODE_FOR_nothing. */
17746 if (TARGET_32BIT && TARGET_POWERPC64)
17747 gpr_type_node = long_long_unsigned_type_node;
17748 else
17749 gpr_type_node = long_unsigned_type_node;
17751 if (attr & RS6000_BTC_SPR)
17753 rettype = gpr_type_node;
17754 argtype = gpr_type_node;
17756 else if (d->code == HTM_BUILTIN_TABORTDC
17757 || d->code == HTM_BUILTIN_TABORTDCI)
17759 rettype = unsigned_type_node;
17760 argtype = gpr_type_node;
17762 else
17764 rettype = unsigned_type_node;
17765 argtype = unsigned_type_node;
17768 if ((mask & builtin_mask) != mask)
17770 if (TARGET_DEBUG_BUILTIN)
17771 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17772 continue;
17775 if (d->name == 0)
17777 if (TARGET_DEBUG_BUILTIN)
17778 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17779 (long unsigned) i);
17780 continue;
17783 op[nopnds++] = (void_func) ? void_type_node : rettype;
17785 if (attr_args == RS6000_BTC_UNARY)
17786 op[nopnds++] = argtype;
17787 else if (attr_args == RS6000_BTC_BINARY)
17789 op[nopnds++] = argtype;
17790 op[nopnds++] = argtype;
17792 else if (attr_args == RS6000_BTC_TERNARY)
17794 op[nopnds++] = argtype;
17795 op[nopnds++] = argtype;
17796 op[nopnds++] = argtype;
17799 switch (nopnds)
17801 case 1:
17802 type = build_function_type_list (op[0], NULL_TREE);
17803 break;
17804 case 2:
17805 type = build_function_type_list (op[0], op[1], NULL_TREE);
17806 break;
17807 case 3:
17808 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17809 break;
17810 case 4:
17811 type = build_function_type_list (op[0], op[1], op[2], op[3],
17812 NULL_TREE);
17813 break;
17814 default:
17815 gcc_unreachable ();
17818 def_builtin (d->name, type, d->code);
17822 /* Hash function for builtin functions with up to 3 arguments and a return
17823 type. */
17824 hashval_t
17825 builtin_hasher::hash (builtin_hash_struct *bh)
17827 unsigned ret = 0;
17828 int i;
17830 for (i = 0; i < 4; i++)
17832 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17833 ret = (ret * 2) + bh->uns_p[i];
17836 return ret;
17839 /* Compare builtin hash entries H1 and H2 for equivalence. */
17840 bool
17841 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17843 return ((p1->mode[0] == p2->mode[0])
17844 && (p1->mode[1] == p2->mode[1])
17845 && (p1->mode[2] == p2->mode[2])
17846 && (p1->mode[3] == p2->mode[3])
17847 && (p1->uns_p[0] == p2->uns_p[0])
17848 && (p1->uns_p[1] == p2->uns_p[1])
17849 && (p1->uns_p[2] == p2->uns_p[2])
17850 && (p1->uns_p[3] == p2->uns_p[3]));
17853 /* Map types for builtin functions with an explicit return type and up to 3
17854 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17855 of the argument. */
17856 static tree
17857 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17858 machine_mode mode_arg1, machine_mode mode_arg2,
17859 enum rs6000_builtins builtin, const char *name)
17861 struct builtin_hash_struct h;
17862 struct builtin_hash_struct *h2;
17863 int num_args = 3;
17864 int i;
17865 tree ret_type = NULL_TREE;
17866 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17868 /* Create builtin_hash_table. */
17869 if (builtin_hash_table == NULL)
17870 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17872 h.type = NULL_TREE;
17873 h.mode[0] = mode_ret;
17874 h.mode[1] = mode_arg0;
17875 h.mode[2] = mode_arg1;
17876 h.mode[3] = mode_arg2;
17877 h.uns_p[0] = 0;
17878 h.uns_p[1] = 0;
17879 h.uns_p[2] = 0;
17880 h.uns_p[3] = 0;
17882 /* If the builtin is a type that produces unsigned results or takes unsigned
17883 arguments, and it is returned as a decl for the vectorizer (such as
17884 widening multiplies, permute), make sure the arguments and return value
17885 are type correct. */
17886 switch (builtin)
17888 /* unsigned 1 argument functions. */
17889 case CRYPTO_BUILTIN_VSBOX:
17890 case P8V_BUILTIN_VGBBD:
17891 case MISC_BUILTIN_CDTBCD:
17892 case MISC_BUILTIN_CBCDTD:
17893 h.uns_p[0] = 1;
17894 h.uns_p[1] = 1;
17895 break;
17897 /* unsigned 2 argument functions. */
17898 case ALTIVEC_BUILTIN_VMULEUB:
17899 case ALTIVEC_BUILTIN_VMULEUH:
17900 case ALTIVEC_BUILTIN_VMULEUW:
17901 case ALTIVEC_BUILTIN_VMULOUB:
17902 case ALTIVEC_BUILTIN_VMULOUH:
17903 case ALTIVEC_BUILTIN_VMULOUW:
17904 case CRYPTO_BUILTIN_VCIPHER:
17905 case CRYPTO_BUILTIN_VCIPHERLAST:
17906 case CRYPTO_BUILTIN_VNCIPHER:
17907 case CRYPTO_BUILTIN_VNCIPHERLAST:
17908 case CRYPTO_BUILTIN_VPMSUMB:
17909 case CRYPTO_BUILTIN_VPMSUMH:
17910 case CRYPTO_BUILTIN_VPMSUMW:
17911 case CRYPTO_BUILTIN_VPMSUMD:
17912 case CRYPTO_BUILTIN_VPMSUM:
17913 case MISC_BUILTIN_ADDG6S:
17914 case MISC_BUILTIN_DIVWEU:
17915 case MISC_BUILTIN_DIVWEUO:
17916 case MISC_BUILTIN_DIVDEU:
17917 case MISC_BUILTIN_DIVDEUO:
17918 case VSX_BUILTIN_UDIV_V2DI:
17919 case ALTIVEC_BUILTIN_VMAXUB:
17920 case ALTIVEC_BUILTIN_VMINUB:
17921 case ALTIVEC_BUILTIN_VMAXUH:
17922 case ALTIVEC_BUILTIN_VMINUH:
17923 case ALTIVEC_BUILTIN_VMAXUW:
17924 case ALTIVEC_BUILTIN_VMINUW:
17925 case P8V_BUILTIN_VMAXUD:
17926 case P8V_BUILTIN_VMINUD:
17927 h.uns_p[0] = 1;
17928 h.uns_p[1] = 1;
17929 h.uns_p[2] = 1;
17930 break;
17932 /* unsigned 3 argument functions. */
17933 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17934 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17935 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17936 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17937 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17938 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17939 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17940 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17941 case VSX_BUILTIN_VPERM_16QI_UNS:
17942 case VSX_BUILTIN_VPERM_8HI_UNS:
17943 case VSX_BUILTIN_VPERM_4SI_UNS:
17944 case VSX_BUILTIN_VPERM_2DI_UNS:
17945 case VSX_BUILTIN_XXSEL_16QI_UNS:
17946 case VSX_BUILTIN_XXSEL_8HI_UNS:
17947 case VSX_BUILTIN_XXSEL_4SI_UNS:
17948 case VSX_BUILTIN_XXSEL_2DI_UNS:
17949 case CRYPTO_BUILTIN_VPERMXOR:
17950 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17951 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17952 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17953 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17954 case CRYPTO_BUILTIN_VSHASIGMAW:
17955 case CRYPTO_BUILTIN_VSHASIGMAD:
17956 case CRYPTO_BUILTIN_VSHASIGMA:
17957 h.uns_p[0] = 1;
17958 h.uns_p[1] = 1;
17959 h.uns_p[2] = 1;
17960 h.uns_p[3] = 1;
17961 break;
17963 /* signed permute functions with unsigned char mask. */
17964 case ALTIVEC_BUILTIN_VPERM_16QI:
17965 case ALTIVEC_BUILTIN_VPERM_8HI:
17966 case ALTIVEC_BUILTIN_VPERM_4SI:
17967 case ALTIVEC_BUILTIN_VPERM_4SF:
17968 case ALTIVEC_BUILTIN_VPERM_2DI:
17969 case ALTIVEC_BUILTIN_VPERM_2DF:
17970 case VSX_BUILTIN_VPERM_16QI:
17971 case VSX_BUILTIN_VPERM_8HI:
17972 case VSX_BUILTIN_VPERM_4SI:
17973 case VSX_BUILTIN_VPERM_4SF:
17974 case VSX_BUILTIN_VPERM_2DI:
17975 case VSX_BUILTIN_VPERM_2DF:
17976 h.uns_p[3] = 1;
17977 break;
17979 /* unsigned args, signed return. */
17980 case VSX_BUILTIN_XVCVUXDSP:
17981 case VSX_BUILTIN_XVCVUXDDP_UNS:
17982 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17983 h.uns_p[1] = 1;
17984 break;
17986 /* signed args, unsigned return. */
17987 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17988 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17989 case MISC_BUILTIN_UNPACK_TD:
17990 case MISC_BUILTIN_UNPACK_V1TI:
17991 h.uns_p[0] = 1;
17992 break;
17994 /* unsigned arguments for 128-bit pack instructions. */
17995 case MISC_BUILTIN_PACK_TD:
17996 case MISC_BUILTIN_PACK_V1TI:
17997 h.uns_p[1] = 1;
17998 h.uns_p[2] = 1;
17999 break;
18001 /* unsigned second arguments (vector shift right). */
18002 case ALTIVEC_BUILTIN_VSRB:
18003 case ALTIVEC_BUILTIN_VSRH:
18004 case ALTIVEC_BUILTIN_VSRW:
18005 case P8V_BUILTIN_VSRD:
18006 h.uns_p[2] = 1;
18007 break;
18009 default:
18010 break;
18013 /* Figure out how many args are present. */
18014 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18015 num_args--;
18017 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18018 if (!ret_type && h.uns_p[0])
18019 ret_type = builtin_mode_to_type[h.mode[0]][0];
18021 if (!ret_type)
18022 fatal_error (input_location,
18023 "internal error: builtin function %qs had an unexpected "
18024 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18026 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18027 arg_type[i] = NULL_TREE;
18029 for (i = 0; i < num_args; i++)
18031 int m = (int) h.mode[i+1];
18032 int uns_p = h.uns_p[i+1];
18034 arg_type[i] = builtin_mode_to_type[m][uns_p];
18035 if (!arg_type[i] && uns_p)
18036 arg_type[i] = builtin_mode_to_type[m][0];
18038 if (!arg_type[i])
18039 fatal_error (input_location,
18040 "internal error: builtin function %qs, argument %d "
18041 "had unexpected argument type %qs", name, i,
18042 GET_MODE_NAME (m));
18045 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18046 if (*found == NULL)
18048 h2 = ggc_alloc<builtin_hash_struct> ();
18049 *h2 = h;
18050 *found = h2;
18052 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18053 arg_type[2], NULL_TREE);
18056 return (*found)->type;
18059 static void
18060 rs6000_common_init_builtins (void)
18062 const struct builtin_description *d;
18063 size_t i;
18065 tree opaque_ftype_opaque = NULL_TREE;
18066 tree opaque_ftype_opaque_opaque = NULL_TREE;
18067 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18068 tree v2si_ftype = NULL_TREE;
18069 tree v2si_ftype_qi = NULL_TREE;
18070 tree v2si_ftype_v2si_qi = NULL_TREE;
18071 tree v2si_ftype_int_qi = NULL_TREE;
18072 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18074 if (!TARGET_PAIRED_FLOAT)
18076 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18077 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18080 /* Paired builtins are only available if you build a compiler with the
18081 appropriate options, so only create those builtins with the appropriate
18082 compiler option. Create Altivec and VSX builtins on machines with at
18083 least the general purpose extensions (970 and newer) to allow the use of
18084 the target attribute.. */
18086 if (TARGET_EXTRA_BUILTINS)
18087 builtin_mask |= RS6000_BTM_COMMON;
18089 /* Add the ternary operators. */
18090 d = bdesc_3arg;
18091 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18093 tree type;
18094 HOST_WIDE_INT mask = d->mask;
18096 if ((mask & builtin_mask) != mask)
18098 if (TARGET_DEBUG_BUILTIN)
18099 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18100 continue;
18103 if (rs6000_overloaded_builtin_p (d->code))
18105 if (! (type = opaque_ftype_opaque_opaque_opaque))
18106 type = opaque_ftype_opaque_opaque_opaque
18107 = build_function_type_list (opaque_V4SI_type_node,
18108 opaque_V4SI_type_node,
18109 opaque_V4SI_type_node,
18110 opaque_V4SI_type_node,
18111 NULL_TREE);
18113 else
18115 enum insn_code icode = d->icode;
18116 if (d->name == 0)
18118 if (TARGET_DEBUG_BUILTIN)
18119 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18120 (long unsigned)i);
18122 continue;
18125 if (icode == CODE_FOR_nothing)
18127 if (TARGET_DEBUG_BUILTIN)
18128 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18129 d->name);
18131 continue;
18134 type = builtin_function_type (insn_data[icode].operand[0].mode,
18135 insn_data[icode].operand[1].mode,
18136 insn_data[icode].operand[2].mode,
18137 insn_data[icode].operand[3].mode,
18138 d->code, d->name);
18141 def_builtin (d->name, type, d->code);
18144 /* Add the binary operators. */
18145 d = bdesc_2arg;
18146 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18148 machine_mode mode0, mode1, mode2;
18149 tree type;
18150 HOST_WIDE_INT mask = d->mask;
18152 if ((mask & builtin_mask) != mask)
18154 if (TARGET_DEBUG_BUILTIN)
18155 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18156 continue;
18159 if (rs6000_overloaded_builtin_p (d->code))
18161 if (! (type = opaque_ftype_opaque_opaque))
18162 type = opaque_ftype_opaque_opaque
18163 = build_function_type_list (opaque_V4SI_type_node,
18164 opaque_V4SI_type_node,
18165 opaque_V4SI_type_node,
18166 NULL_TREE);
18168 else
18170 enum insn_code icode = d->icode;
18171 if (d->name == 0)
18173 if (TARGET_DEBUG_BUILTIN)
18174 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18175 (long unsigned)i);
18177 continue;
18180 if (icode == CODE_FOR_nothing)
18182 if (TARGET_DEBUG_BUILTIN)
18183 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18184 d->name);
18186 continue;
18189 mode0 = insn_data[icode].operand[0].mode;
18190 mode1 = insn_data[icode].operand[1].mode;
18191 mode2 = insn_data[icode].operand[2].mode;
18193 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18195 if (! (type = v2si_ftype_v2si_qi))
18196 type = v2si_ftype_v2si_qi
18197 = build_function_type_list (opaque_V2SI_type_node,
18198 opaque_V2SI_type_node,
18199 char_type_node,
18200 NULL_TREE);
18203 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18204 && mode2 == QImode)
18206 if (! (type = v2si_ftype_int_qi))
18207 type = v2si_ftype_int_qi
18208 = build_function_type_list (opaque_V2SI_type_node,
18209 integer_type_node,
18210 char_type_node,
18211 NULL_TREE);
18214 else
18215 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18216 d->code, d->name);
18219 def_builtin (d->name, type, d->code);
18222 /* Add the simple unary operators. */
18223 d = bdesc_1arg;
18224 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18226 machine_mode mode0, mode1;
18227 tree type;
18228 HOST_WIDE_INT mask = d->mask;
18230 if ((mask & builtin_mask) != mask)
18232 if (TARGET_DEBUG_BUILTIN)
18233 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18234 continue;
18237 if (rs6000_overloaded_builtin_p (d->code))
18239 if (! (type = opaque_ftype_opaque))
18240 type = opaque_ftype_opaque
18241 = build_function_type_list (opaque_V4SI_type_node,
18242 opaque_V4SI_type_node,
18243 NULL_TREE);
18245 else
18247 enum insn_code icode = d->icode;
18248 if (d->name == 0)
18250 if (TARGET_DEBUG_BUILTIN)
18251 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18252 (long unsigned)i);
18254 continue;
18257 if (icode == CODE_FOR_nothing)
18259 if (TARGET_DEBUG_BUILTIN)
18260 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18261 d->name);
18263 continue;
18266 mode0 = insn_data[icode].operand[0].mode;
18267 mode1 = insn_data[icode].operand[1].mode;
18269 if (mode0 == V2SImode && mode1 == QImode)
18271 if (! (type = v2si_ftype_qi))
18272 type = v2si_ftype_qi
18273 = build_function_type_list (opaque_V2SI_type_node,
18274 char_type_node,
18275 NULL_TREE);
18278 else
18279 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18280 d->code, d->name);
18283 def_builtin (d->name, type, d->code);
18286 /* Add the simple no-argument operators. */
18287 d = bdesc_0arg;
18288 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18290 machine_mode mode0;
18291 tree type;
18292 HOST_WIDE_INT mask = d->mask;
18294 if ((mask & builtin_mask) != mask)
18296 if (TARGET_DEBUG_BUILTIN)
18297 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18298 continue;
18300 if (rs6000_overloaded_builtin_p (d->code))
18302 if (!opaque_ftype_opaque)
18303 opaque_ftype_opaque
18304 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18305 type = opaque_ftype_opaque;
18307 else
18309 enum insn_code icode = d->icode;
18310 if (d->name == 0)
18312 if (TARGET_DEBUG_BUILTIN)
18313 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18314 (long unsigned) i);
18315 continue;
18317 if (icode == CODE_FOR_nothing)
18319 if (TARGET_DEBUG_BUILTIN)
18320 fprintf (stderr,
18321 "rs6000_builtin, skip no-argument %s (no code)\n",
18322 d->name);
18323 continue;
18325 mode0 = insn_data[icode].operand[0].mode;
18326 if (mode0 == V2SImode)
18328 /* code for paired single */
18329 if (! (type = v2si_ftype))
18331 v2si_ftype
18332 = build_function_type_list (opaque_V2SI_type_node,
18333 NULL_TREE);
18334 type = v2si_ftype;
18337 else
18338 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18339 d->code, d->name);
18341 def_builtin (d->name, type, d->code);
18345 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18346 static void
18347 init_float128_ibm (machine_mode mode)
18349 if (!TARGET_XL_COMPAT)
18351 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18352 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18353 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18354 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18356 if (!TARGET_HARD_FLOAT)
18358 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18359 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18360 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18361 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18362 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18363 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18364 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18365 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18367 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18368 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18369 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18370 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18371 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18372 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18373 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18374 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18377 else
18379 set_optab_libfunc (add_optab, mode, "_xlqadd");
18380 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18381 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18382 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18385 /* Add various conversions for IFmode to use the traditional TFmode
18386 names. */
18387 if (mode == IFmode)
18389 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18390 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18391 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18392 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18393 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18394 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18396 if (TARGET_POWERPC64)
18398 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18399 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18400 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18401 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18406 /* Set up IEEE 128-bit floating point routines. Use different names if the
18407 arguments can be passed in a vector register. The historical PowerPC
18408 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18409 continue to use that if we aren't using vector registers to pass IEEE
18410 128-bit floating point. */
18412 static void
18413 init_float128_ieee (machine_mode mode)
18415 if (FLOAT128_VECTOR_P (mode))
18417 set_optab_libfunc (add_optab, mode, "__addkf3");
18418 set_optab_libfunc (sub_optab, mode, "__subkf3");
18419 set_optab_libfunc (neg_optab, mode, "__negkf2");
18420 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18421 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18422 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18423 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18425 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18426 set_optab_libfunc (ne_optab, mode, "__nekf2");
18427 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18428 set_optab_libfunc (ge_optab, mode, "__gekf2");
18429 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18430 set_optab_libfunc (le_optab, mode, "__lekf2");
18431 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18433 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18434 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18435 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18436 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18438 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18439 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18440 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18442 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18443 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18444 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18446 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18447 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18448 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18449 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18450 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18451 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18453 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18454 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18455 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18456 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18458 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18459 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18460 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18461 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18463 if (TARGET_POWERPC64)
18465 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18466 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18467 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18468 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18472 else
18474 set_optab_libfunc (add_optab, mode, "_q_add");
18475 set_optab_libfunc (sub_optab, mode, "_q_sub");
18476 set_optab_libfunc (neg_optab, mode, "_q_neg");
18477 set_optab_libfunc (smul_optab, mode, "_q_mul");
18478 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18479 if (TARGET_PPC_GPOPT)
18480 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18482 set_optab_libfunc (eq_optab, mode, "_q_feq");
18483 set_optab_libfunc (ne_optab, mode, "_q_fne");
18484 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18485 set_optab_libfunc (ge_optab, mode, "_q_fge");
18486 set_optab_libfunc (lt_optab, mode, "_q_flt");
18487 set_optab_libfunc (le_optab, mode, "_q_fle");
18489 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18490 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18491 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18492 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18493 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18494 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18495 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18496 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18500 static void
18501 rs6000_init_libfuncs (void)
18503 /* __float128 support. */
18504 if (TARGET_FLOAT128_TYPE)
18506 init_float128_ibm (IFmode);
18507 init_float128_ieee (KFmode);
18510 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18511 if (TARGET_LONG_DOUBLE_128)
18513 if (!TARGET_IEEEQUAD)
18514 init_float128_ibm (TFmode);
18516 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18517 else
18518 init_float128_ieee (TFmode);
18522 /* Emit a potentially record-form instruction, setting DST from SRC.
18523 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18524 signed comparison of DST with zero. If DOT is 1, the generated RTL
18525 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18526 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18527 a separate COMPARE. */
18529 void
18530 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18532 if (dot == 0)
18534 emit_move_insn (dst, src);
18535 return;
18538 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18540 emit_move_insn (dst, src);
18541 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18542 return;
18545 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18546 if (dot == 1)
18548 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18549 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18551 else
18553 rtx set = gen_rtx_SET (dst, src);
18554 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18559 /* A validation routine: say whether CODE, a condition code, and MODE
18560 match. The other alternatives either don't make sense or should
18561 never be generated. */
18563 void
18564 validate_condition_mode (enum rtx_code code, machine_mode mode)
18566 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18567 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18568 && GET_MODE_CLASS (mode) == MODE_CC);
18570 /* These don't make sense. */
18571 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18572 || mode != CCUNSmode);
18574 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18575 || mode == CCUNSmode);
18577 gcc_assert (mode == CCFPmode
18578 || (code != ORDERED && code != UNORDERED
18579 && code != UNEQ && code != LTGT
18580 && code != UNGT && code != UNLT
18581 && code != UNGE && code != UNLE));
18583 /* These should never be generated except for
18584 flag_finite_math_only. */
18585 gcc_assert (mode != CCFPmode
18586 || flag_finite_math_only
18587 || (code != LE && code != GE
18588 && code != UNEQ && code != LTGT
18589 && code != UNGT && code != UNLT));
18591 /* These are invalid; the information is not there. */
18592 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18596 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18597 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18598 not zero, store there the bit offset (counted from the right) where
18599 the single stretch of 1 bits begins; and similarly for B, the bit
18600 offset where it ends. */
18602 bool
18603 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18605 unsigned HOST_WIDE_INT val = INTVAL (mask);
18606 unsigned HOST_WIDE_INT bit;
18607 int nb, ne;
18608 int n = GET_MODE_PRECISION (mode);
18610 if (mode != DImode && mode != SImode)
18611 return false;
18613 if (INTVAL (mask) >= 0)
18615 bit = val & -val;
18616 ne = exact_log2 (bit);
18617 nb = exact_log2 (val + bit);
18619 else if (val + 1 == 0)
18621 nb = n;
18622 ne = 0;
18624 else if (val & 1)
18626 val = ~val;
18627 bit = val & -val;
18628 nb = exact_log2 (bit);
18629 ne = exact_log2 (val + bit);
18631 else
18633 bit = val & -val;
18634 ne = exact_log2 (bit);
18635 if (val + bit == 0)
18636 nb = n;
18637 else
18638 nb = 0;
18641 nb--;
18643 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18644 return false;
18646 if (b)
18647 *b = nb;
18648 if (e)
18649 *e = ne;
18651 return true;
18654 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18655 or rldicr instruction, to implement an AND with it in mode MODE. */
18657 bool
18658 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18660 int nb, ne;
18662 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18663 return false;
18665 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18666 does not wrap. */
18667 if (mode == DImode)
18668 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18670 /* For SImode, rlwinm can do everything. */
18671 if (mode == SImode)
18672 return (nb < 32 && ne < 32);
18674 return false;
18677 /* Return the instruction template for an AND with mask in mode MODE, with
18678 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18680 const char *
18681 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18683 int nb, ne;
18685 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18686 gcc_unreachable ();
18688 if (mode == DImode && ne == 0)
18690 operands[3] = GEN_INT (63 - nb);
18691 if (dot)
18692 return "rldicl. %0,%1,0,%3";
18693 return "rldicl %0,%1,0,%3";
18696 if (mode == DImode && nb == 63)
18698 operands[3] = GEN_INT (63 - ne);
18699 if (dot)
18700 return "rldicr. %0,%1,0,%3";
18701 return "rldicr %0,%1,0,%3";
18704 if (nb < 32 && ne < 32)
18706 operands[3] = GEN_INT (31 - nb);
18707 operands[4] = GEN_INT (31 - ne);
18708 if (dot)
18709 return "rlwinm. %0,%1,0,%3,%4";
18710 return "rlwinm %0,%1,0,%3,%4";
18713 gcc_unreachable ();
18716 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18717 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18718 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18720 bool
18721 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18723 int nb, ne;
18725 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18726 return false;
18728 int n = GET_MODE_PRECISION (mode);
18729 int sh = -1;
18731 if (CONST_INT_P (XEXP (shift, 1)))
18733 sh = INTVAL (XEXP (shift, 1));
18734 if (sh < 0 || sh >= n)
18735 return false;
18738 rtx_code code = GET_CODE (shift);
18740 /* Convert any shift by 0 to a rotate, to simplify below code. */
18741 if (sh == 0)
18742 code = ROTATE;
18744 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18745 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18746 code = ASHIFT;
18747 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18749 code = LSHIFTRT;
18750 sh = n - sh;
18753 /* DImode rotates need rld*. */
18754 if (mode == DImode && code == ROTATE)
18755 return (nb == 63 || ne == 0 || ne == sh);
18757 /* SImode rotates need rlw*. */
18758 if (mode == SImode && code == ROTATE)
18759 return (nb < 32 && ne < 32 && sh < 32);
18761 /* Wrap-around masks are only okay for rotates. */
18762 if (ne > nb)
18763 return false;
18765 /* Variable shifts are only okay for rotates. */
18766 if (sh < 0)
18767 return false;
18769 /* Don't allow ASHIFT if the mask is wrong for that. */
18770 if (code == ASHIFT && ne < sh)
18771 return false;
18773 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18774 if the mask is wrong for that. */
18775 if (nb < 32 && ne < 32 && sh < 32
18776 && !(code == LSHIFTRT && nb >= 32 - sh))
18777 return true;
18779 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18780 if the mask is wrong for that. */
18781 if (code == LSHIFTRT)
18782 sh = 64 - sh;
18783 if (nb == 63 || ne == 0 || ne == sh)
18784 return !(code == LSHIFTRT && nb >= sh);
18786 return false;
18789 /* Return the instruction template for a shift with mask in mode MODE, with
18790 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18792 const char *
18793 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18795 int nb, ne;
18797 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18798 gcc_unreachable ();
18800 if (mode == DImode && ne == 0)
18802 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18803 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18804 operands[3] = GEN_INT (63 - nb);
18805 if (dot)
18806 return "rld%I2cl. %0,%1,%2,%3";
18807 return "rld%I2cl %0,%1,%2,%3";
18810 if (mode == DImode && nb == 63)
18812 operands[3] = GEN_INT (63 - ne);
18813 if (dot)
18814 return "rld%I2cr. %0,%1,%2,%3";
18815 return "rld%I2cr %0,%1,%2,%3";
18818 if (mode == DImode
18819 && GET_CODE (operands[4]) != LSHIFTRT
18820 && CONST_INT_P (operands[2])
18821 && ne == INTVAL (operands[2]))
18823 operands[3] = GEN_INT (63 - nb);
18824 if (dot)
18825 return "rld%I2c. %0,%1,%2,%3";
18826 return "rld%I2c %0,%1,%2,%3";
18829 if (nb < 32 && ne < 32)
18831 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18832 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18833 operands[3] = GEN_INT (31 - nb);
18834 operands[4] = GEN_INT (31 - ne);
18835 /* This insn can also be a 64-bit rotate with mask that really makes
18836 it just a shift right (with mask); the %h below are to adjust for
18837 that situation (shift count is >= 32 in that case). */
18838 if (dot)
18839 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18840 return "rlw%I2nm %0,%1,%h2,%3,%4";
18843 gcc_unreachable ();
18846 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18847 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18848 ASHIFT, or LSHIFTRT) in mode MODE. */
18850 bool
18851 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18853 int nb, ne;
18855 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18856 return false;
18858 int n = GET_MODE_PRECISION (mode);
18860 int sh = INTVAL (XEXP (shift, 1));
18861 if (sh < 0 || sh >= n)
18862 return false;
18864 rtx_code code = GET_CODE (shift);
18866 /* Convert any shift by 0 to a rotate, to simplify below code. */
18867 if (sh == 0)
18868 code = ROTATE;
18870 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18871 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18872 code = ASHIFT;
18873 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18875 code = LSHIFTRT;
18876 sh = n - sh;
18879 /* DImode rotates need rldimi. */
18880 if (mode == DImode && code == ROTATE)
18881 return (ne == sh);
18883 /* SImode rotates need rlwimi. */
18884 if (mode == SImode && code == ROTATE)
18885 return (nb < 32 && ne < 32 && sh < 32);
18887 /* Wrap-around masks are only okay for rotates. */
18888 if (ne > nb)
18889 return false;
18891 /* Don't allow ASHIFT if the mask is wrong for that. */
18892 if (code == ASHIFT && ne < sh)
18893 return false;
18895 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18896 if the mask is wrong for that. */
18897 if (nb < 32 && ne < 32 && sh < 32
18898 && !(code == LSHIFTRT && nb >= 32 - sh))
18899 return true;
18901 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18902 if the mask is wrong for that. */
18903 if (code == LSHIFTRT)
18904 sh = 64 - sh;
18905 if (ne == sh)
18906 return !(code == LSHIFTRT && nb >= sh);
18908 return false;
18911 /* Return the instruction template for an insert with mask in mode MODE, with
18912 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18914 const char *
18915 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18917 int nb, ne;
18919 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18920 gcc_unreachable ();
18922 /* Prefer rldimi because rlwimi is cracked. */
18923 if (TARGET_POWERPC64
18924 && (!dot || mode == DImode)
18925 && GET_CODE (operands[4]) != LSHIFTRT
18926 && ne == INTVAL (operands[2]))
18928 operands[3] = GEN_INT (63 - nb);
18929 if (dot)
18930 return "rldimi. %0,%1,%2,%3";
18931 return "rldimi %0,%1,%2,%3";
18934 if (nb < 32 && ne < 32)
18936 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18937 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18938 operands[3] = GEN_INT (31 - nb);
18939 operands[4] = GEN_INT (31 - ne);
18940 if (dot)
18941 return "rlwimi. %0,%1,%2,%3,%4";
18942 return "rlwimi %0,%1,%2,%3,%4";
18945 gcc_unreachable ();
18948 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18949 using two machine instructions. */
18951 bool
18952 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18954 /* There are two kinds of AND we can handle with two insns:
18955 1) those we can do with two rl* insn;
18956 2) ori[s];xori[s].
18958 We do not handle that last case yet. */
18960 /* If there is just one stretch of ones, we can do it. */
18961 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18962 return true;
18964 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18965 one insn, we can do the whole thing with two. */
18966 unsigned HOST_WIDE_INT val = INTVAL (c);
18967 unsigned HOST_WIDE_INT bit1 = val & -val;
18968 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18969 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18970 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18971 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18974 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18975 If EXPAND is true, split rotate-and-mask instructions we generate to
18976 their constituent parts as well (this is used during expand); if DOT
18977 is 1, make the last insn a record-form instruction clobbering the
18978 destination GPR and setting the CC reg (from operands[3]); if 2, set
18979 that GPR as well as the CC reg. */
18981 void
18982 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18984 gcc_assert (!(expand && dot));
18986 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18988 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18989 shift right. This generates better code than doing the masks without
18990 shifts, or shifting first right and then left. */
18991 int nb, ne;
18992 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18994 gcc_assert (mode == DImode);
18996 int shift = 63 - nb;
18997 if (expand)
18999 rtx tmp1 = gen_reg_rtx (DImode);
19000 rtx tmp2 = gen_reg_rtx (DImode);
19001 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19002 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19003 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19005 else
19007 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19008 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19009 emit_move_insn (operands[0], tmp);
19010 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19011 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19013 return;
19016 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19017 that does the rest. */
19018 unsigned HOST_WIDE_INT bit1 = val & -val;
19019 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19020 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19021 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19023 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19024 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19026 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19028 /* Two "no-rotate"-and-mask instructions, for SImode. */
19029 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19031 gcc_assert (mode == SImode);
19033 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19034 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19035 emit_move_insn (reg, tmp);
19036 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19037 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19038 return;
19041 gcc_assert (mode == DImode);
19043 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19044 insns; we have to do the first in SImode, because it wraps. */
19045 if (mask2 <= 0xffffffff
19046 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19048 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19049 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19050 GEN_INT (mask1));
19051 rtx reg_low = gen_lowpart (SImode, reg);
19052 emit_move_insn (reg_low, tmp);
19053 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19054 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19055 return;
19058 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19059 at the top end), rotate back and clear the other hole. */
19060 int right = exact_log2 (bit3);
19061 int left = 64 - right;
19063 /* Rotate the mask too. */
19064 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19066 if (expand)
19068 rtx tmp1 = gen_reg_rtx (DImode);
19069 rtx tmp2 = gen_reg_rtx (DImode);
19070 rtx tmp3 = gen_reg_rtx (DImode);
19071 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19072 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19073 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19074 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19076 else
19078 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19079 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19080 emit_move_insn (operands[0], tmp);
19081 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19082 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19083 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19087 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19088 for lfq and stfq insns iff the registers are hard registers. */
19091 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19093 /* We might have been passed a SUBREG. */
19094 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19095 return 0;
19097 /* We might have been passed non floating point registers. */
19098 if (!FP_REGNO_P (REGNO (reg1))
19099 || !FP_REGNO_P (REGNO (reg2)))
19100 return 0;
19102 return (REGNO (reg1) == REGNO (reg2) - 1);
19105 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19106 addr1 and addr2 must be in consecutive memory locations
19107 (addr2 == addr1 + 8). */
19110 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19112 rtx addr1, addr2;
19113 unsigned int reg1, reg2;
19114 int offset1, offset2;
19116 /* The mems cannot be volatile. */
19117 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19118 return 0;
19120 addr1 = XEXP (mem1, 0);
19121 addr2 = XEXP (mem2, 0);
19123 /* Extract an offset (if used) from the first addr. */
19124 if (GET_CODE (addr1) == PLUS)
19126 /* If not a REG, return zero. */
19127 if (GET_CODE (XEXP (addr1, 0)) != REG)
19128 return 0;
19129 else
19131 reg1 = REGNO (XEXP (addr1, 0));
19132 /* The offset must be constant! */
19133 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19134 return 0;
19135 offset1 = INTVAL (XEXP (addr1, 1));
19138 else if (GET_CODE (addr1) != REG)
19139 return 0;
19140 else
19142 reg1 = REGNO (addr1);
19143 /* This was a simple (mem (reg)) expression. Offset is 0. */
19144 offset1 = 0;
19147 /* And now for the second addr. */
19148 if (GET_CODE (addr2) == PLUS)
19150 /* If not a REG, return zero. */
19151 if (GET_CODE (XEXP (addr2, 0)) != REG)
19152 return 0;
19153 else
19155 reg2 = REGNO (XEXP (addr2, 0));
19156 /* The offset must be constant. */
19157 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19158 return 0;
19159 offset2 = INTVAL (XEXP (addr2, 1));
19162 else if (GET_CODE (addr2) != REG)
19163 return 0;
19164 else
19166 reg2 = REGNO (addr2);
19167 /* This was a simple (mem (reg)) expression. Offset is 0. */
19168 offset2 = 0;
19171 /* Both of these must have the same base register. */
19172 if (reg1 != reg2)
19173 return 0;
19175 /* The offset for the second addr must be 8 more than the first addr. */
19176 if (offset2 != offset1 + 8)
19177 return 0;
19179 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19180 instructions. */
19181 return 1;
19184 /* Return the mode to be used for memory when a secondary memory
19185 location is needed. For SDmode values we need to use DDmode, in
19186 all other cases we can use the same mode. */
19187 machine_mode
19188 rs6000_secondary_memory_needed_mode (machine_mode mode)
19190 if (lra_in_progress && mode == SDmode)
19191 return DDmode;
19192 return mode;
19195 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19196 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19197 only work on the traditional altivec registers, note if an altivec register
19198 was chosen. */
19200 static enum rs6000_reg_type
19201 register_to_reg_type (rtx reg, bool *is_altivec)
19203 HOST_WIDE_INT regno;
19204 enum reg_class rclass;
19206 if (GET_CODE (reg) == SUBREG)
19207 reg = SUBREG_REG (reg);
19209 if (!REG_P (reg))
19210 return NO_REG_TYPE;
19212 regno = REGNO (reg);
19213 if (regno >= FIRST_PSEUDO_REGISTER)
19215 if (!lra_in_progress && !reload_completed)
19216 return PSEUDO_REG_TYPE;
19218 regno = true_regnum (reg);
19219 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19220 return PSEUDO_REG_TYPE;
19223 gcc_assert (regno >= 0);
19225 if (is_altivec && ALTIVEC_REGNO_P (regno))
19226 *is_altivec = true;
19228 rclass = rs6000_regno_regclass[regno];
19229 return reg_class_to_reg_type[(int)rclass];
19232 /* Helper function to return the cost of adding a TOC entry address. */
19234 static inline int
19235 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19237 int ret;
19239 if (TARGET_CMODEL != CMODEL_SMALL)
19240 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19242 else
19243 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19245 return ret;
19248 /* Helper function for rs6000_secondary_reload to determine whether the memory
19249 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19250 needs reloading. Return negative if the memory is not handled by the memory
19251 helper functions and to try a different reload method, 0 if no additional
19252 instructions are need, and positive to give the extra cost for the
19253 memory. */
19255 static int
19256 rs6000_secondary_reload_memory (rtx addr,
19257 enum reg_class rclass,
19258 machine_mode mode)
19260 int extra_cost = 0;
19261 rtx reg, and_arg, plus_arg0, plus_arg1;
19262 addr_mask_type addr_mask;
19263 const char *type = NULL;
19264 const char *fail_msg = NULL;
19266 if (GPR_REG_CLASS_P (rclass))
19267 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19269 else if (rclass == FLOAT_REGS)
19270 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19272 else if (rclass == ALTIVEC_REGS)
19273 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19275 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19276 else if (rclass == VSX_REGS)
19277 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19278 & ~RELOAD_REG_AND_M16);
19280 /* If the register allocator hasn't made up its mind yet on the register
19281 class to use, settle on defaults to use. */
19282 else if (rclass == NO_REGS)
19284 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19285 & ~RELOAD_REG_AND_M16);
19287 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19288 addr_mask &= ~(RELOAD_REG_INDEXED
19289 | RELOAD_REG_PRE_INCDEC
19290 | RELOAD_REG_PRE_MODIFY);
19293 else
19294 addr_mask = 0;
19296 /* If the register isn't valid in this register class, just return now. */
19297 if ((addr_mask & RELOAD_REG_VALID) == 0)
19299 if (TARGET_DEBUG_ADDR)
19301 fprintf (stderr,
19302 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19303 "not valid in class\n",
19304 GET_MODE_NAME (mode), reg_class_names[rclass]);
19305 debug_rtx (addr);
19308 return -1;
19311 switch (GET_CODE (addr))
19313 /* Does the register class supports auto update forms for this mode? We
19314 don't need a scratch register, since the powerpc only supports
19315 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19316 case PRE_INC:
19317 case PRE_DEC:
19318 reg = XEXP (addr, 0);
19319 if (!base_reg_operand (addr, GET_MODE (reg)))
19321 fail_msg = "no base register #1";
19322 extra_cost = -1;
19325 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19327 extra_cost = 1;
19328 type = "update";
19330 break;
19332 case PRE_MODIFY:
19333 reg = XEXP (addr, 0);
19334 plus_arg1 = XEXP (addr, 1);
19335 if (!base_reg_operand (reg, GET_MODE (reg))
19336 || GET_CODE (plus_arg1) != PLUS
19337 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19339 fail_msg = "bad PRE_MODIFY";
19340 extra_cost = -1;
19343 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19345 extra_cost = 1;
19346 type = "update";
19348 break;
19350 /* Do we need to simulate AND -16 to clear the bottom address bits used
19351 in VMX load/stores? Only allow the AND for vector sizes. */
19352 case AND:
19353 and_arg = XEXP (addr, 0);
19354 if (GET_MODE_SIZE (mode) != 16
19355 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19356 || INTVAL (XEXP (addr, 1)) != -16)
19358 fail_msg = "bad Altivec AND #1";
19359 extra_cost = -1;
19362 if (rclass != ALTIVEC_REGS)
19364 if (legitimate_indirect_address_p (and_arg, false))
19365 extra_cost = 1;
19367 else if (legitimate_indexed_address_p (and_arg, false))
19368 extra_cost = 2;
19370 else
19372 fail_msg = "bad Altivec AND #2";
19373 extra_cost = -1;
19376 type = "and";
19378 break;
19380 /* If this is an indirect address, make sure it is a base register. */
19381 case REG:
19382 case SUBREG:
19383 if (!legitimate_indirect_address_p (addr, false))
19385 extra_cost = 1;
19386 type = "move";
19388 break;
19390 /* If this is an indexed address, make sure the register class can handle
19391 indexed addresses for this mode. */
19392 case PLUS:
19393 plus_arg0 = XEXP (addr, 0);
19394 plus_arg1 = XEXP (addr, 1);
19396 /* (plus (plus (reg) (constant)) (constant)) is generated during
19397 push_reload processing, so handle it now. */
19398 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19400 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19402 extra_cost = 1;
19403 type = "offset";
19407 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19408 push_reload processing, so handle it now. */
19409 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19411 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19413 extra_cost = 1;
19414 type = "indexed #2";
19418 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19420 fail_msg = "no base register #2";
19421 extra_cost = -1;
19424 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19426 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19427 || !legitimate_indexed_address_p (addr, false))
19429 extra_cost = 1;
19430 type = "indexed";
19434 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19435 && CONST_INT_P (plus_arg1))
19437 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19439 extra_cost = 1;
19440 type = "vector d-form offset";
19444 /* Make sure the register class can handle offset addresses. */
19445 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19447 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19449 extra_cost = 1;
19450 type = "offset #2";
19454 else
19456 fail_msg = "bad PLUS";
19457 extra_cost = -1;
19460 break;
19462 case LO_SUM:
19463 /* Quad offsets are restricted and can't handle normal addresses. */
19464 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19466 extra_cost = -1;
19467 type = "vector d-form lo_sum";
19470 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19472 fail_msg = "bad LO_SUM";
19473 extra_cost = -1;
19476 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19478 extra_cost = 1;
19479 type = "lo_sum";
19481 break;
19483 /* Static addresses need to create a TOC entry. */
19484 case CONST:
19485 case SYMBOL_REF:
19486 case LABEL_REF:
19487 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19489 extra_cost = -1;
19490 type = "vector d-form lo_sum #2";
19493 else
19495 type = "address";
19496 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19498 break;
19500 /* TOC references look like offsetable memory. */
19501 case UNSPEC:
19502 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19504 fail_msg = "bad UNSPEC";
19505 extra_cost = -1;
19508 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19510 extra_cost = -1;
19511 type = "vector d-form lo_sum #3";
19514 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19516 extra_cost = 1;
19517 type = "toc reference";
19519 break;
19521 default:
19523 fail_msg = "bad address";
19524 extra_cost = -1;
19528 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19530 if (extra_cost < 0)
19531 fprintf (stderr,
19532 "rs6000_secondary_reload_memory error: mode = %s, "
19533 "class = %s, addr_mask = '%s', %s\n",
19534 GET_MODE_NAME (mode),
19535 reg_class_names[rclass],
19536 rs6000_debug_addr_mask (addr_mask, false),
19537 (fail_msg != NULL) ? fail_msg : "<bad address>");
19539 else
19540 fprintf (stderr,
19541 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19542 "addr_mask = '%s', extra cost = %d, %s\n",
19543 GET_MODE_NAME (mode),
19544 reg_class_names[rclass],
19545 rs6000_debug_addr_mask (addr_mask, false),
19546 extra_cost,
19547 (type) ? type : "<none>");
19549 debug_rtx (addr);
19552 return extra_cost;
19555 /* Helper function for rs6000_secondary_reload to return true if a move to a
19556 different register classe is really a simple move. */
19558 static bool
19559 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19560 enum rs6000_reg_type from_type,
19561 machine_mode mode)
19563 int size = GET_MODE_SIZE (mode);
19565 /* Add support for various direct moves available. In this function, we only
19566 look at cases where we don't need any extra registers, and one or more
19567 simple move insns are issued. Originally small integers are not allowed
19568 in FPR/VSX registers. Single precision binary floating is not a simple
19569 move because we need to convert to the single precision memory layout.
19570 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19571 need special direct move handling, which we do not support yet. */
19572 if (TARGET_DIRECT_MOVE
19573 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19574 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19576 if (TARGET_POWERPC64)
19578 /* ISA 2.07: MTVSRD or MVFVSRD. */
19579 if (size == 8)
19580 return true;
19582 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19583 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19584 return true;
19587 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19588 if (TARGET_P8_VECTOR)
19590 if (mode == SImode)
19591 return true;
19593 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19594 return true;
19597 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19598 if (mode == SDmode)
19599 return true;
19602 /* Power6+: MFTGPR or MFFGPR. */
19603 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19604 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19605 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19606 return true;
19608 /* Move to/from SPR. */
19609 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19610 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19611 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19612 return true;
19614 return false;
19617 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19618 special direct moves that involve allocating an extra register, return the
19619 insn code of the helper function if there is such a function or
19620 CODE_FOR_nothing if not. */
19622 static bool
19623 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19624 enum rs6000_reg_type from_type,
19625 machine_mode mode,
19626 secondary_reload_info *sri,
19627 bool altivec_p)
19629 bool ret = false;
19630 enum insn_code icode = CODE_FOR_nothing;
19631 int cost = 0;
19632 int size = GET_MODE_SIZE (mode);
19634 if (TARGET_POWERPC64 && size == 16)
19636 /* Handle moving 128-bit values from GPRs to VSX point registers on
19637 ISA 2.07 (power8, power9) when running in 64-bit mode using
19638 XXPERMDI to glue the two 64-bit values back together. */
19639 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19641 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19642 icode = reg_addr[mode].reload_vsx_gpr;
19645 /* Handle moving 128-bit values from VSX point registers to GPRs on
19646 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19647 bottom 64-bit value. */
19648 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19650 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19651 icode = reg_addr[mode].reload_gpr_vsx;
19655 else if (TARGET_POWERPC64 && mode == SFmode)
19657 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19659 cost = 3; /* xscvdpspn, mfvsrd, and. */
19660 icode = reg_addr[mode].reload_gpr_vsx;
19663 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19665 cost = 2; /* mtvsrz, xscvspdpn. */
19666 icode = reg_addr[mode].reload_vsx_gpr;
19670 else if (!TARGET_POWERPC64 && size == 8)
19672 /* Handle moving 64-bit values from GPRs to floating point registers on
19673 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19674 32-bit values back together. Altivec register classes must be handled
19675 specially since a different instruction is used, and the secondary
19676 reload support requires a single instruction class in the scratch
19677 register constraint. However, right now TFmode is not allowed in
19678 Altivec registers, so the pattern will never match. */
19679 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19681 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19682 icode = reg_addr[mode].reload_fpr_gpr;
19686 if (icode != CODE_FOR_nothing)
19688 ret = true;
19689 if (sri)
19691 sri->icode = icode;
19692 sri->extra_cost = cost;
19696 return ret;
19699 /* Return whether a move between two register classes can be done either
19700 directly (simple move) or via a pattern that uses a single extra temporary
19701 (using ISA 2.07's direct move in this case. */
19703 static bool
19704 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19705 enum rs6000_reg_type from_type,
19706 machine_mode mode,
19707 secondary_reload_info *sri,
19708 bool altivec_p)
19710 /* Fall back to load/store reloads if either type is not a register. */
19711 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19712 return false;
19714 /* If we haven't allocated registers yet, assume the move can be done for the
19715 standard register types. */
19716 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19717 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19718 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19719 return true;
19721 /* Moves to the same set of registers is a simple move for non-specialized
19722 registers. */
19723 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19724 return true;
19726 /* Check whether a simple move can be done directly. */
19727 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19729 if (sri)
19731 sri->icode = CODE_FOR_nothing;
19732 sri->extra_cost = 0;
19734 return true;
19737 /* Now check if we can do it in a few steps. */
19738 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19739 altivec_p);
19742 /* Inform reload about cases where moving X with a mode MODE to a register in
19743 RCLASS requires an extra scratch or immediate register. Return the class
19744 needed for the immediate register.
19746 For VSX and Altivec, we may need a register to convert sp+offset into
19747 reg+sp.
19749 For misaligned 64-bit gpr loads and stores we need a register to
19750 convert an offset address to indirect. */
19752 static reg_class_t
19753 rs6000_secondary_reload (bool in_p,
19754 rtx x,
19755 reg_class_t rclass_i,
19756 machine_mode mode,
19757 secondary_reload_info *sri)
19759 enum reg_class rclass = (enum reg_class) rclass_i;
19760 reg_class_t ret = ALL_REGS;
19761 enum insn_code icode;
19762 bool default_p = false;
19763 bool done_p = false;
19765 /* Allow subreg of memory before/during reload. */
19766 bool memory_p = (MEM_P (x)
19767 || (!reload_completed && GET_CODE (x) == SUBREG
19768 && MEM_P (SUBREG_REG (x))));
19770 sri->icode = CODE_FOR_nothing;
19771 sri->t_icode = CODE_FOR_nothing;
19772 sri->extra_cost = 0;
19773 icode = ((in_p)
19774 ? reg_addr[mode].reload_load
19775 : reg_addr[mode].reload_store);
19777 if (REG_P (x) || register_operand (x, mode))
19779 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19780 bool altivec_p = (rclass == ALTIVEC_REGS);
19781 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19783 if (!in_p)
19784 std::swap (to_type, from_type);
19786 /* Can we do a direct move of some sort? */
19787 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19788 altivec_p))
19790 icode = (enum insn_code)sri->icode;
19791 default_p = false;
19792 done_p = true;
19793 ret = NO_REGS;
19797 /* Make sure 0.0 is not reloaded or forced into memory. */
19798 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19800 ret = NO_REGS;
19801 default_p = false;
19802 done_p = true;
19805 /* If this is a scalar floating point value and we want to load it into the
19806 traditional Altivec registers, do it via a move via a traditional floating
19807 point register, unless we have D-form addressing. Also make sure that
19808 non-zero constants use a FPR. */
19809 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19810 && !mode_supports_vmx_dform (mode)
19811 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19812 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19814 ret = FLOAT_REGS;
19815 default_p = false;
19816 done_p = true;
19819 /* Handle reload of load/stores if we have reload helper functions. */
19820 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19822 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19823 mode);
19825 if (extra_cost >= 0)
19827 done_p = true;
19828 ret = NO_REGS;
19829 if (extra_cost > 0)
19831 sri->extra_cost = extra_cost;
19832 sri->icode = icode;
19837 /* Handle unaligned loads and stores of integer registers. */
19838 if (!done_p && TARGET_POWERPC64
19839 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19840 && memory_p
19841 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19843 rtx addr = XEXP (x, 0);
19844 rtx off = address_offset (addr);
19846 if (off != NULL_RTX)
19848 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19849 unsigned HOST_WIDE_INT offset = INTVAL (off);
19851 /* We need a secondary reload when our legitimate_address_p
19852 says the address is good (as otherwise the entire address
19853 will be reloaded), and the offset is not a multiple of
19854 four or we have an address wrap. Address wrap will only
19855 occur for LO_SUMs since legitimate_offset_address_p
19856 rejects addresses for 16-byte mems that will wrap. */
19857 if (GET_CODE (addr) == LO_SUM
19858 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19859 && ((offset & 3) != 0
19860 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19861 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19862 && (offset & 3) != 0))
19864 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19865 if (in_p)
19866 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19867 : CODE_FOR_reload_di_load);
19868 else
19869 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19870 : CODE_FOR_reload_di_store);
19871 sri->extra_cost = 2;
19872 ret = NO_REGS;
19873 done_p = true;
19875 else
19876 default_p = true;
19878 else
19879 default_p = true;
19882 if (!done_p && !TARGET_POWERPC64
19883 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19884 && memory_p
19885 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19887 rtx addr = XEXP (x, 0);
19888 rtx off = address_offset (addr);
19890 if (off != NULL_RTX)
19892 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19893 unsigned HOST_WIDE_INT offset = INTVAL (off);
19895 /* We need a secondary reload when our legitimate_address_p
19896 says the address is good (as otherwise the entire address
19897 will be reloaded), and we have a wrap.
19899 legitimate_lo_sum_address_p allows LO_SUM addresses to
19900 have any offset so test for wrap in the low 16 bits.
19902 legitimate_offset_address_p checks for the range
19903 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19904 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19905 [0x7ff4,0x7fff] respectively, so test for the
19906 intersection of these ranges, [0x7ffc,0x7fff] and
19907 [0x7ff4,0x7ff7] respectively.
19909 Note that the address we see here may have been
19910 manipulated by legitimize_reload_address. */
19911 if (GET_CODE (addr) == LO_SUM
19912 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19913 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19915 if (in_p)
19916 sri->icode = CODE_FOR_reload_si_load;
19917 else
19918 sri->icode = CODE_FOR_reload_si_store;
19919 sri->extra_cost = 2;
19920 ret = NO_REGS;
19921 done_p = true;
19923 else
19924 default_p = true;
19926 else
19927 default_p = true;
19930 if (!done_p)
19931 default_p = true;
19933 if (default_p)
19934 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19936 gcc_assert (ret != ALL_REGS);
19938 if (TARGET_DEBUG_ADDR)
19940 fprintf (stderr,
19941 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19942 "mode = %s",
19943 reg_class_names[ret],
19944 in_p ? "true" : "false",
19945 reg_class_names[rclass],
19946 GET_MODE_NAME (mode));
19948 if (reload_completed)
19949 fputs (", after reload", stderr);
19951 if (!done_p)
19952 fputs (", done_p not set", stderr);
19954 if (default_p)
19955 fputs (", default secondary reload", stderr);
19957 if (sri->icode != CODE_FOR_nothing)
19958 fprintf (stderr, ", reload func = %s, extra cost = %d",
19959 insn_data[sri->icode].name, sri->extra_cost);
19961 else if (sri->extra_cost > 0)
19962 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19964 fputs ("\n", stderr);
19965 debug_rtx (x);
19968 return ret;
19971 /* Better tracing for rs6000_secondary_reload_inner. */
19973 static void
19974 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19975 bool store_p)
19977 rtx set, clobber;
19979 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19981 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19982 store_p ? "store" : "load");
19984 if (store_p)
19985 set = gen_rtx_SET (mem, reg);
19986 else
19987 set = gen_rtx_SET (reg, mem);
19989 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19990 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19993 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19994 ATTRIBUTE_NORETURN;
19996 static void
19997 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19998 bool store_p)
20000 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20001 gcc_unreachable ();
20004 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20005 reload helper functions. These were identified in
20006 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20007 reload, it calls the insns:
20008 reload_<RELOAD:mode>_<P:mptrsize>_store
20009 reload_<RELOAD:mode>_<P:mptrsize>_load
20011 which in turn calls this function, to do whatever is necessary to create
20012 valid addresses. */
20014 void
20015 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20017 int regno = true_regnum (reg);
20018 machine_mode mode = GET_MODE (reg);
20019 addr_mask_type addr_mask;
20020 rtx addr;
20021 rtx new_addr;
20022 rtx op_reg, op0, op1;
20023 rtx and_op;
20024 rtx cc_clobber;
20025 rtvec rv;
20027 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20028 || !base_reg_operand (scratch, GET_MODE (scratch)))
20029 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20031 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20032 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20034 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20035 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20037 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20038 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20040 else
20041 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20043 /* Make sure the mode is valid in this register class. */
20044 if ((addr_mask & RELOAD_REG_VALID) == 0)
20045 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20047 if (TARGET_DEBUG_ADDR)
20048 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20050 new_addr = addr = XEXP (mem, 0);
20051 switch (GET_CODE (addr))
20053 /* Does the register class support auto update forms for this mode? If
20054 not, do the update now. We don't need a scratch register, since the
20055 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20056 case PRE_INC:
20057 case PRE_DEC:
20058 op_reg = XEXP (addr, 0);
20059 if (!base_reg_operand (op_reg, Pmode))
20060 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20062 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20064 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20065 new_addr = op_reg;
20067 break;
20069 case PRE_MODIFY:
20070 op0 = XEXP (addr, 0);
20071 op1 = XEXP (addr, 1);
20072 if (!base_reg_operand (op0, Pmode)
20073 || GET_CODE (op1) != PLUS
20074 || !rtx_equal_p (op0, XEXP (op1, 0)))
20075 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20077 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20079 emit_insn (gen_rtx_SET (op0, op1));
20080 new_addr = reg;
20082 break;
20084 /* Do we need to simulate AND -16 to clear the bottom address bits used
20085 in VMX load/stores? */
20086 case AND:
20087 op0 = XEXP (addr, 0);
20088 op1 = XEXP (addr, 1);
20089 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20091 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20092 op_reg = op0;
20094 else if (GET_CODE (op1) == PLUS)
20096 emit_insn (gen_rtx_SET (scratch, op1));
20097 op_reg = scratch;
20100 else
20101 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20103 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20104 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20105 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20106 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20107 new_addr = scratch;
20109 break;
20111 /* If this is an indirect address, make sure it is a base register. */
20112 case REG:
20113 case SUBREG:
20114 if (!base_reg_operand (addr, GET_MODE (addr)))
20116 emit_insn (gen_rtx_SET (scratch, addr));
20117 new_addr = scratch;
20119 break;
20121 /* If this is an indexed address, make sure the register class can handle
20122 indexed addresses for this mode. */
20123 case PLUS:
20124 op0 = XEXP (addr, 0);
20125 op1 = XEXP (addr, 1);
20126 if (!base_reg_operand (op0, Pmode))
20127 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20129 else if (int_reg_operand (op1, Pmode))
20131 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20133 emit_insn (gen_rtx_SET (scratch, addr));
20134 new_addr = scratch;
20138 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20140 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20141 || !quad_address_p (addr, mode, false))
20143 emit_insn (gen_rtx_SET (scratch, addr));
20144 new_addr = scratch;
20148 /* Make sure the register class can handle offset addresses. */
20149 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20151 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20153 emit_insn (gen_rtx_SET (scratch, addr));
20154 new_addr = scratch;
20158 else
20159 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20161 break;
20163 case LO_SUM:
20164 op0 = XEXP (addr, 0);
20165 op1 = XEXP (addr, 1);
20166 if (!base_reg_operand (op0, Pmode))
20167 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20169 else if (int_reg_operand (op1, Pmode))
20171 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20173 emit_insn (gen_rtx_SET (scratch, addr));
20174 new_addr = scratch;
20178 /* Quad offsets are restricted and can't handle normal addresses. */
20179 else if (mode_supports_vsx_dform_quad (mode))
20181 emit_insn (gen_rtx_SET (scratch, addr));
20182 new_addr = scratch;
20185 /* Make sure the register class can handle offset addresses. */
20186 else if (legitimate_lo_sum_address_p (mode, addr, false))
20188 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20190 emit_insn (gen_rtx_SET (scratch, addr));
20191 new_addr = scratch;
20195 else
20196 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20198 break;
20200 case SYMBOL_REF:
20201 case CONST:
20202 case LABEL_REF:
20203 rs6000_emit_move (scratch, addr, Pmode);
20204 new_addr = scratch;
20205 break;
20207 default:
20208 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20211 /* Adjust the address if it changed. */
20212 if (addr != new_addr)
20214 mem = replace_equiv_address_nv (mem, new_addr);
20215 if (TARGET_DEBUG_ADDR)
20216 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20219 /* Now create the move. */
20220 if (store_p)
20221 emit_insn (gen_rtx_SET (mem, reg));
20222 else
20223 emit_insn (gen_rtx_SET (reg, mem));
20225 return;
20228 /* Convert reloads involving 64-bit gprs and misaligned offset
20229 addressing, or multiple 32-bit gprs and offsets that are too large,
20230 to use indirect addressing. */
20232 void
20233 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20235 int regno = true_regnum (reg);
20236 enum reg_class rclass;
20237 rtx addr;
20238 rtx scratch_or_premodify = scratch;
20240 if (TARGET_DEBUG_ADDR)
20242 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20243 store_p ? "store" : "load");
20244 fprintf (stderr, "reg:\n");
20245 debug_rtx (reg);
20246 fprintf (stderr, "mem:\n");
20247 debug_rtx (mem);
20248 fprintf (stderr, "scratch:\n");
20249 debug_rtx (scratch);
20252 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20253 gcc_assert (GET_CODE (mem) == MEM);
20254 rclass = REGNO_REG_CLASS (regno);
20255 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20256 addr = XEXP (mem, 0);
20258 if (GET_CODE (addr) == PRE_MODIFY)
20260 gcc_assert (REG_P (XEXP (addr, 0))
20261 && GET_CODE (XEXP (addr, 1)) == PLUS
20262 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20263 scratch_or_premodify = XEXP (addr, 0);
20264 if (!HARD_REGISTER_P (scratch_or_premodify))
20265 /* If we have a pseudo here then reload will have arranged
20266 to have it replaced, but only in the original insn.
20267 Use the replacement here too. */
20268 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20270 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20271 expressions from the original insn, without unsharing them.
20272 Any RTL that points into the original insn will of course
20273 have register replacements applied. That is why we don't
20274 need to look for replacements under the PLUS. */
20275 addr = XEXP (addr, 1);
20277 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20279 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20281 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20283 /* Now create the move. */
20284 if (store_p)
20285 emit_insn (gen_rtx_SET (mem, reg));
20286 else
20287 emit_insn (gen_rtx_SET (reg, mem));
20289 return;
20292 /* Given an rtx X being reloaded into a reg required to be
20293 in class CLASS, return the class of reg to actually use.
20294 In general this is just CLASS; but on some machines
20295 in some cases it is preferable to use a more restrictive class.
20297 On the RS/6000, we have to return NO_REGS when we want to reload a
20298 floating-point CONST_DOUBLE to force it to be copied to memory.
20300 We also don't want to reload integer values into floating-point
20301 registers if we can at all help it. In fact, this can
20302 cause reload to die, if it tries to generate a reload of CTR
20303 into a FP register and discovers it doesn't have the memory location
20304 required.
20306 ??? Would it be a good idea to have reload do the converse, that is
20307 try to reload floating modes into FP registers if possible?
20310 static enum reg_class
20311 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20313 machine_mode mode = GET_MODE (x);
20314 bool is_constant = CONSTANT_P (x);
20316 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20317 reload class for it. */
20318 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20319 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20320 return NO_REGS;
20322 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20323 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20324 return NO_REGS;
20326 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20327 the reloading of address expressions using PLUS into floating point
20328 registers. */
20329 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20331 if (is_constant)
20333 /* Zero is always allowed in all VSX registers. */
20334 if (x == CONST0_RTX (mode))
20335 return rclass;
20337 /* If this is a vector constant that can be formed with a few Altivec
20338 instructions, we want altivec registers. */
20339 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20340 return ALTIVEC_REGS;
20342 /* If this is an integer constant that can easily be loaded into
20343 vector registers, allow it. */
20344 if (CONST_INT_P (x))
20346 HOST_WIDE_INT value = INTVAL (x);
20348 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20349 2.06 can generate it in the Altivec registers with
20350 VSPLTI<x>. */
20351 if (value == -1)
20353 if (TARGET_P8_VECTOR)
20354 return rclass;
20355 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20356 return ALTIVEC_REGS;
20357 else
20358 return NO_REGS;
20361 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20362 a sign extend in the Altivec registers. */
20363 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20364 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20365 return ALTIVEC_REGS;
20368 /* Force constant to memory. */
20369 return NO_REGS;
20372 /* D-form addressing can easily reload the value. */
20373 if (mode_supports_vmx_dform (mode)
20374 || mode_supports_vsx_dform_quad (mode))
20375 return rclass;
20377 /* If this is a scalar floating point value and we don't have D-form
20378 addressing, prefer the traditional floating point registers so that we
20379 can use D-form (register+offset) addressing. */
20380 if (rclass == VSX_REGS
20381 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20382 return FLOAT_REGS;
20384 /* Prefer the Altivec registers if Altivec is handling the vector
20385 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20386 loads. */
20387 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20388 || mode == V1TImode)
20389 return ALTIVEC_REGS;
20391 return rclass;
20394 if (is_constant || GET_CODE (x) == PLUS)
20396 if (reg_class_subset_p (GENERAL_REGS, rclass))
20397 return GENERAL_REGS;
20398 if (reg_class_subset_p (BASE_REGS, rclass))
20399 return BASE_REGS;
20400 return NO_REGS;
20403 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20404 return GENERAL_REGS;
20406 return rclass;
20409 /* Debug version of rs6000_preferred_reload_class. */
20410 static enum reg_class
20411 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20413 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20415 fprintf (stderr,
20416 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20417 "mode = %s, x:\n",
20418 reg_class_names[ret], reg_class_names[rclass],
20419 GET_MODE_NAME (GET_MODE (x)));
20420 debug_rtx (x);
20422 return ret;
20425 /* If we are copying between FP or AltiVec registers and anything else, we need
20426 a memory location. The exception is when we are targeting ppc64 and the
20427 move to/from fpr to gpr instructions are available. Also, under VSX, you
20428 can copy vector registers from the FP register set to the Altivec register
20429 set and vice versa. */
20431 static bool
20432 rs6000_secondary_memory_needed (enum reg_class from_class,
20433 enum reg_class to_class,
20434 machine_mode mode)
20436 enum rs6000_reg_type from_type, to_type;
20437 bool altivec_p = ((from_class == ALTIVEC_REGS)
20438 || (to_class == ALTIVEC_REGS));
20440 /* If a simple/direct move is available, we don't need secondary memory */
20441 from_type = reg_class_to_reg_type[(int)from_class];
20442 to_type = reg_class_to_reg_type[(int)to_class];
20444 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20445 (secondary_reload_info *)0, altivec_p))
20446 return false;
20448 /* If we have a floating point or vector register class, we need to use
20449 memory to transfer the data. */
20450 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20451 return true;
20453 return false;
20456 /* Debug version of rs6000_secondary_memory_needed. */
20457 static bool
20458 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20459 enum reg_class to_class,
20460 machine_mode mode)
20462 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20464 fprintf (stderr,
20465 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20466 "to_class = %s, mode = %s\n",
20467 ret ? "true" : "false",
20468 reg_class_names[from_class],
20469 reg_class_names[to_class],
20470 GET_MODE_NAME (mode));
20472 return ret;
20475 /* Return the register class of a scratch register needed to copy IN into
20476 or out of a register in RCLASS in MODE. If it can be done directly,
20477 NO_REGS is returned. */
20479 static enum reg_class
20480 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20481 rtx in)
20483 int regno;
20485 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20486 #if TARGET_MACHO
20487 && MACHOPIC_INDIRECT
20488 #endif
20491 /* We cannot copy a symbolic operand directly into anything
20492 other than BASE_REGS for TARGET_ELF. So indicate that a
20493 register from BASE_REGS is needed as an intermediate
20494 register.
20496 On Darwin, pic addresses require a load from memory, which
20497 needs a base register. */
20498 if (rclass != BASE_REGS
20499 && (GET_CODE (in) == SYMBOL_REF
20500 || GET_CODE (in) == HIGH
20501 || GET_CODE (in) == LABEL_REF
20502 || GET_CODE (in) == CONST))
20503 return BASE_REGS;
20506 if (GET_CODE (in) == REG)
20508 regno = REGNO (in);
20509 if (regno >= FIRST_PSEUDO_REGISTER)
20511 regno = true_regnum (in);
20512 if (regno >= FIRST_PSEUDO_REGISTER)
20513 regno = -1;
20516 else if (GET_CODE (in) == SUBREG)
20518 regno = true_regnum (in);
20519 if (regno >= FIRST_PSEUDO_REGISTER)
20520 regno = -1;
20522 else
20523 regno = -1;
20525 /* If we have VSX register moves, prefer moving scalar values between
20526 Altivec registers and GPR by going via an FPR (and then via memory)
20527 instead of reloading the secondary memory address for Altivec moves. */
20528 if (TARGET_VSX
20529 && GET_MODE_SIZE (mode) < 16
20530 && !mode_supports_vmx_dform (mode)
20531 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20532 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20533 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20534 && (regno >= 0 && INT_REGNO_P (regno)))))
20535 return FLOAT_REGS;
20537 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20538 into anything. */
20539 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20540 || (regno >= 0 && INT_REGNO_P (regno)))
20541 return NO_REGS;
20543 /* Constants, memory, and VSX registers can go into VSX registers (both the
20544 traditional floating point and the altivec registers). */
20545 if (rclass == VSX_REGS
20546 && (regno == -1 || VSX_REGNO_P (regno)))
20547 return NO_REGS;
20549 /* Constants, memory, and FP registers can go into FP registers. */
20550 if ((regno == -1 || FP_REGNO_P (regno))
20551 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20552 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20554 /* Memory, and AltiVec registers can go into AltiVec registers. */
20555 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20556 && rclass == ALTIVEC_REGS)
20557 return NO_REGS;
20559 /* We can copy among the CR registers. */
20560 if ((rclass == CR_REGS || rclass == CR0_REGS)
20561 && regno >= 0 && CR_REGNO_P (regno))
20562 return NO_REGS;
20564 /* Otherwise, we need GENERAL_REGS. */
20565 return GENERAL_REGS;
20568 /* Debug version of rs6000_secondary_reload_class. */
20569 static enum reg_class
20570 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20571 machine_mode mode, rtx in)
20573 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20574 fprintf (stderr,
20575 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20576 "mode = %s, input rtx:\n",
20577 reg_class_names[ret], reg_class_names[rclass],
20578 GET_MODE_NAME (mode));
20579 debug_rtx (in);
20581 return ret;
20584 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20586 static bool
20587 rs6000_cannot_change_mode_class (machine_mode from,
20588 machine_mode to,
20589 enum reg_class rclass)
20591 unsigned from_size = GET_MODE_SIZE (from);
20592 unsigned to_size = GET_MODE_SIZE (to);
20594 if (from_size != to_size)
20596 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20598 if (reg_classes_intersect_p (xclass, rclass))
20600 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
20601 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
20602 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20603 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20605 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20606 single register under VSX because the scalar part of the register
20607 is in the upper 64-bits, and not the lower 64-bits. Types like
20608 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20609 IEEE floating point can't overlap, and neither can small
20610 values. */
20612 if (to_float128_vector_p && from_float128_vector_p)
20613 return false;
20615 else if (to_float128_vector_p || from_float128_vector_p)
20616 return true;
20618 /* TDmode in floating-mode registers must always go into a register
20619 pair with the most significant word in the even-numbered register
20620 to match ISA requirements. In little-endian mode, this does not
20621 match subreg numbering, so we cannot allow subregs. */
20622 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20623 return true;
20625 if (from_size < 8 || to_size < 8)
20626 return true;
20628 if (from_size == 8 && (8 * to_nregs) != to_size)
20629 return true;
20631 if (to_size == 8 && (8 * from_nregs) != from_size)
20632 return true;
20634 return false;
20636 else
20637 return false;
20640 /* Since the VSX register set includes traditional floating point registers
20641 and altivec registers, just check for the size being different instead of
20642 trying to check whether the modes are vector modes. Otherwise it won't
20643 allow say DF and DI to change classes. For types like TFmode and TDmode
20644 that take 2 64-bit registers, rather than a single 128-bit register, don't
20645 allow subregs of those types to other 128 bit types. */
20646 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20648 unsigned num_regs = (from_size + 15) / 16;
20649 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
20650 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
20651 return true;
20653 return (from_size != 8 && from_size != 16);
20656 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20657 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20658 return true;
20660 return false;
20663 /* Debug version of rs6000_cannot_change_mode_class. */
20664 static bool
20665 rs6000_debug_cannot_change_mode_class (machine_mode from,
20666 machine_mode to,
20667 enum reg_class rclass)
20669 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20671 fprintf (stderr,
20672 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20673 "to = %s, rclass = %s\n",
20674 ret ? "true" : "false",
20675 GET_MODE_NAME (from), GET_MODE_NAME (to),
20676 reg_class_names[rclass]);
20678 return ret;
20681 /* Return a string to do a move operation of 128 bits of data. */
20683 const char *
20684 rs6000_output_move_128bit (rtx operands[])
20686 rtx dest = operands[0];
20687 rtx src = operands[1];
20688 machine_mode mode = GET_MODE (dest);
20689 int dest_regno;
20690 int src_regno;
20691 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20692 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20694 if (REG_P (dest))
20696 dest_regno = REGNO (dest);
20697 dest_gpr_p = INT_REGNO_P (dest_regno);
20698 dest_fp_p = FP_REGNO_P (dest_regno);
20699 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20700 dest_vsx_p = dest_fp_p | dest_vmx_p;
20702 else
20704 dest_regno = -1;
20705 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20708 if (REG_P (src))
20710 src_regno = REGNO (src);
20711 src_gpr_p = INT_REGNO_P (src_regno);
20712 src_fp_p = FP_REGNO_P (src_regno);
20713 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20714 src_vsx_p = src_fp_p | src_vmx_p;
20716 else
20718 src_regno = -1;
20719 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20722 /* Register moves. */
20723 if (dest_regno >= 0 && src_regno >= 0)
20725 if (dest_gpr_p)
20727 if (src_gpr_p)
20728 return "#";
20730 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20731 return (WORDS_BIG_ENDIAN
20732 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20733 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20735 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20736 return "#";
20739 else if (TARGET_VSX && dest_vsx_p)
20741 if (src_vsx_p)
20742 return "xxlor %x0,%x1,%x1";
20744 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20745 return (WORDS_BIG_ENDIAN
20746 ? "mtvsrdd %x0,%1,%L1"
20747 : "mtvsrdd %x0,%L1,%1");
20749 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20750 return "#";
20753 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20754 return "vor %0,%1,%1";
20756 else if (dest_fp_p && src_fp_p)
20757 return "#";
20760 /* Loads. */
20761 else if (dest_regno >= 0 && MEM_P (src))
20763 if (dest_gpr_p)
20765 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20766 return "lq %0,%1";
20767 else
20768 return "#";
20771 else if (TARGET_ALTIVEC && dest_vmx_p
20772 && altivec_indexed_or_indirect_operand (src, mode))
20773 return "lvx %0,%y1";
20775 else if (TARGET_VSX && dest_vsx_p)
20777 if (mode_supports_vsx_dform_quad (mode)
20778 && quad_address_p (XEXP (src, 0), mode, true))
20779 return "lxv %x0,%1";
20781 else if (TARGET_P9_VECTOR)
20782 return "lxvx %x0,%y1";
20784 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20785 return "lxvw4x %x0,%y1";
20787 else
20788 return "lxvd2x %x0,%y1";
20791 else if (TARGET_ALTIVEC && dest_vmx_p)
20792 return "lvx %0,%y1";
20794 else if (dest_fp_p)
20795 return "#";
20798 /* Stores. */
20799 else if (src_regno >= 0 && MEM_P (dest))
20801 if (src_gpr_p)
20803 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20804 return "stq %1,%0";
20805 else
20806 return "#";
20809 else if (TARGET_ALTIVEC && src_vmx_p
20810 && altivec_indexed_or_indirect_operand (src, mode))
20811 return "stvx %1,%y0";
20813 else if (TARGET_VSX && src_vsx_p)
20815 if (mode_supports_vsx_dform_quad (mode)
20816 && quad_address_p (XEXP (dest, 0), mode, true))
20817 return "stxv %x1,%0";
20819 else if (TARGET_P9_VECTOR)
20820 return "stxvx %x1,%y0";
20822 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20823 return "stxvw4x %x1,%y0";
20825 else
20826 return "stxvd2x %x1,%y0";
20829 else if (TARGET_ALTIVEC && src_vmx_p)
20830 return "stvx %1,%y0";
20832 else if (src_fp_p)
20833 return "#";
20836 /* Constants. */
20837 else if (dest_regno >= 0
20838 && (GET_CODE (src) == CONST_INT
20839 || GET_CODE (src) == CONST_WIDE_INT
20840 || GET_CODE (src) == CONST_DOUBLE
20841 || GET_CODE (src) == CONST_VECTOR))
20843 if (dest_gpr_p)
20844 return "#";
20846 else if ((dest_vmx_p && TARGET_ALTIVEC)
20847 || (dest_vsx_p && TARGET_VSX))
20848 return output_vec_const_move (operands);
20851 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20854 /* Validate a 128-bit move. */
20855 bool
20856 rs6000_move_128bit_ok_p (rtx operands[])
20858 machine_mode mode = GET_MODE (operands[0]);
20859 return (gpc_reg_operand (operands[0], mode)
20860 || gpc_reg_operand (operands[1], mode));
20863 /* Return true if a 128-bit move needs to be split. */
20864 bool
20865 rs6000_split_128bit_ok_p (rtx operands[])
20867 if (!reload_completed)
20868 return false;
20870 if (!gpr_or_gpr_p (operands[0], operands[1]))
20871 return false;
20873 if (quad_load_store_p (operands[0], operands[1]))
20874 return false;
20876 return true;
20880 /* Given a comparison operation, return the bit number in CCR to test. We
20881 know this is a valid comparison.
20883 SCC_P is 1 if this is for an scc. That means that %D will have been
20884 used instead of %C, so the bits will be in different places.
20886 Return -1 if OP isn't a valid comparison for some reason. */
20889 ccr_bit (rtx op, int scc_p)
20891 enum rtx_code code = GET_CODE (op);
20892 machine_mode cc_mode;
20893 int cc_regnum;
20894 int base_bit;
20895 rtx reg;
20897 if (!COMPARISON_P (op))
20898 return -1;
20900 reg = XEXP (op, 0);
20902 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20904 cc_mode = GET_MODE (reg);
20905 cc_regnum = REGNO (reg);
20906 base_bit = 4 * (cc_regnum - CR0_REGNO);
20908 validate_condition_mode (code, cc_mode);
20910 /* When generating a sCOND operation, only positive conditions are
20911 allowed. */
20912 gcc_assert (!scc_p
20913 || code == EQ || code == GT || code == LT || code == UNORDERED
20914 || code == GTU || code == LTU);
20916 switch (code)
20918 case NE:
20919 return scc_p ? base_bit + 3 : base_bit + 2;
20920 case EQ:
20921 return base_bit + 2;
20922 case GT: case GTU: case UNLE:
20923 return base_bit + 1;
20924 case LT: case LTU: case UNGE:
20925 return base_bit;
20926 case ORDERED: case UNORDERED:
20927 return base_bit + 3;
20929 case GE: case GEU:
20930 /* If scc, we will have done a cror to put the bit in the
20931 unordered position. So test that bit. For integer, this is ! LT
20932 unless this is an scc insn. */
20933 return scc_p ? base_bit + 3 : base_bit;
20935 case LE: case LEU:
20936 return scc_p ? base_bit + 3 : base_bit + 1;
20938 default:
20939 gcc_unreachable ();
20943 /* Return the GOT register. */
20946 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20948 /* The second flow pass currently (June 1999) can't update
20949 regs_ever_live without disturbing other parts of the compiler, so
20950 update it here to make the prolog/epilogue code happy. */
20951 if (!can_create_pseudo_p ()
20952 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20953 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20955 crtl->uses_pic_offset_table = 1;
20957 return pic_offset_table_rtx;
20960 static rs6000_stack_t stack_info;
20962 /* Function to init struct machine_function.
20963 This will be called, via a pointer variable,
20964 from push_function_context. */
20966 static struct machine_function *
20967 rs6000_init_machine_status (void)
20969 stack_info.reload_completed = 0;
20970 return ggc_cleared_alloc<machine_function> ();
20973 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20975 /* Write out a function code label. */
20977 void
20978 rs6000_output_function_entry (FILE *file, const char *fname)
20980 if (fname[0] != '.')
20982 switch (DEFAULT_ABI)
20984 default:
20985 gcc_unreachable ();
20987 case ABI_AIX:
20988 if (DOT_SYMBOLS)
20989 putc ('.', file);
20990 else
20991 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20992 break;
20994 case ABI_ELFv2:
20995 case ABI_V4:
20996 case ABI_DARWIN:
20997 break;
21001 RS6000_OUTPUT_BASENAME (file, fname);
21004 /* Print an operand. Recognize special options, documented below. */
21006 #if TARGET_ELF
21007 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21008 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21009 #else
21010 #define SMALL_DATA_RELOC "sda21"
21011 #define SMALL_DATA_REG 0
21012 #endif
21014 void
21015 print_operand (FILE *file, rtx x, int code)
21017 int i;
21018 unsigned HOST_WIDE_INT uval;
21020 switch (code)
21022 /* %a is output_address. */
21024 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21025 output_operand. */
21027 case 'D':
21028 /* Like 'J' but get to the GT bit only. */
21029 gcc_assert (REG_P (x));
21031 /* Bit 1 is GT bit. */
21032 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21034 /* Add one for shift count in rlinm for scc. */
21035 fprintf (file, "%d", i + 1);
21036 return;
21038 case 'e':
21039 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21040 if (! INT_P (x))
21042 output_operand_lossage ("invalid %%e value");
21043 return;
21046 uval = INTVAL (x);
21047 if ((uval & 0xffff) == 0 && uval != 0)
21048 putc ('s', file);
21049 return;
21051 case 'E':
21052 /* X is a CR register. Print the number of the EQ bit of the CR */
21053 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21054 output_operand_lossage ("invalid %%E value");
21055 else
21056 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21057 return;
21059 case 'f':
21060 /* X is a CR register. Print the shift count needed to move it
21061 to the high-order four bits. */
21062 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21063 output_operand_lossage ("invalid %%f value");
21064 else
21065 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21066 return;
21068 case 'F':
21069 /* Similar, but print the count for the rotate in the opposite
21070 direction. */
21071 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21072 output_operand_lossage ("invalid %%F value");
21073 else
21074 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21075 return;
21077 case 'G':
21078 /* X is a constant integer. If it is negative, print "m",
21079 otherwise print "z". This is to make an aze or ame insn. */
21080 if (GET_CODE (x) != CONST_INT)
21081 output_operand_lossage ("invalid %%G value");
21082 else if (INTVAL (x) >= 0)
21083 putc ('z', file);
21084 else
21085 putc ('m', file);
21086 return;
21088 case 'h':
21089 /* If constant, output low-order five bits. Otherwise, write
21090 normally. */
21091 if (INT_P (x))
21092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21093 else
21094 print_operand (file, x, 0);
21095 return;
21097 case 'H':
21098 /* If constant, output low-order six bits. Otherwise, write
21099 normally. */
21100 if (INT_P (x))
21101 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21102 else
21103 print_operand (file, x, 0);
21104 return;
21106 case 'I':
21107 /* Print `i' if this is a constant, else nothing. */
21108 if (INT_P (x))
21109 putc ('i', file);
21110 return;
21112 case 'j':
21113 /* Write the bit number in CCR for jump. */
21114 i = ccr_bit (x, 0);
21115 if (i == -1)
21116 output_operand_lossage ("invalid %%j code");
21117 else
21118 fprintf (file, "%d", i);
21119 return;
21121 case 'J':
21122 /* Similar, but add one for shift count in rlinm for scc and pass
21123 scc flag to `ccr_bit'. */
21124 i = ccr_bit (x, 1);
21125 if (i == -1)
21126 output_operand_lossage ("invalid %%J code");
21127 else
21128 /* If we want bit 31, write a shift count of zero, not 32. */
21129 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21130 return;
21132 case 'k':
21133 /* X must be a constant. Write the 1's complement of the
21134 constant. */
21135 if (! INT_P (x))
21136 output_operand_lossage ("invalid %%k value");
21137 else
21138 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21139 return;
21141 case 'K':
21142 /* X must be a symbolic constant on ELF. Write an
21143 expression suitable for an 'addi' that adds in the low 16
21144 bits of the MEM. */
21145 if (GET_CODE (x) == CONST)
21147 if (GET_CODE (XEXP (x, 0)) != PLUS
21148 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21149 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21150 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21151 output_operand_lossage ("invalid %%K value");
21153 print_operand_address (file, x);
21154 fputs ("@l", file);
21155 return;
21157 /* %l is output_asm_label. */
21159 case 'L':
21160 /* Write second word of DImode or DFmode reference. Works on register
21161 or non-indexed memory only. */
21162 if (REG_P (x))
21163 fputs (reg_names[REGNO (x) + 1], file);
21164 else if (MEM_P (x))
21166 machine_mode mode = GET_MODE (x);
21167 /* Handle possible auto-increment. Since it is pre-increment and
21168 we have already done it, we can just use an offset of word. */
21169 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21170 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21171 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21172 UNITS_PER_WORD));
21173 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21174 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21175 UNITS_PER_WORD));
21176 else
21177 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21178 UNITS_PER_WORD),
21179 0));
21181 if (small_data_operand (x, GET_MODE (x)))
21182 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21183 reg_names[SMALL_DATA_REG]);
21185 return;
21187 case 'N':
21188 /* Write the number of elements in the vector times 4. */
21189 if (GET_CODE (x) != PARALLEL)
21190 output_operand_lossage ("invalid %%N value");
21191 else
21192 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21193 return;
21195 case 'O':
21196 /* Similar, but subtract 1 first. */
21197 if (GET_CODE (x) != PARALLEL)
21198 output_operand_lossage ("invalid %%O value");
21199 else
21200 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21201 return;
21203 case 'p':
21204 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21205 if (! INT_P (x)
21206 || INTVAL (x) < 0
21207 || (i = exact_log2 (INTVAL (x))) < 0)
21208 output_operand_lossage ("invalid %%p value");
21209 else
21210 fprintf (file, "%d", i);
21211 return;
21213 case 'P':
21214 /* The operand must be an indirect memory reference. The result
21215 is the register name. */
21216 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21217 || REGNO (XEXP (x, 0)) >= 32)
21218 output_operand_lossage ("invalid %%P value");
21219 else
21220 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21221 return;
21223 case 'q':
21224 /* This outputs the logical code corresponding to a boolean
21225 expression. The expression may have one or both operands
21226 negated (if one, only the first one). For condition register
21227 logical operations, it will also treat the negated
21228 CR codes as NOTs, but not handle NOTs of them. */
21230 const char *const *t = 0;
21231 const char *s;
21232 enum rtx_code code = GET_CODE (x);
21233 static const char * const tbl[3][3] = {
21234 { "and", "andc", "nor" },
21235 { "or", "orc", "nand" },
21236 { "xor", "eqv", "xor" } };
21238 if (code == AND)
21239 t = tbl[0];
21240 else if (code == IOR)
21241 t = tbl[1];
21242 else if (code == XOR)
21243 t = tbl[2];
21244 else
21245 output_operand_lossage ("invalid %%q value");
21247 if (GET_CODE (XEXP (x, 0)) != NOT)
21248 s = t[0];
21249 else
21251 if (GET_CODE (XEXP (x, 1)) == NOT)
21252 s = t[2];
21253 else
21254 s = t[1];
21257 fputs (s, file);
21259 return;
21261 case 'Q':
21262 if (! TARGET_MFCRF)
21263 return;
21264 fputc (',', file);
21265 /* FALLTHRU */
21267 case 'R':
21268 /* X is a CR register. Print the mask for `mtcrf'. */
21269 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21270 output_operand_lossage ("invalid %%R value");
21271 else
21272 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21273 return;
21275 case 's':
21276 /* Low 5 bits of 32 - value */
21277 if (! INT_P (x))
21278 output_operand_lossage ("invalid %%s value");
21279 else
21280 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21281 return;
21283 case 't':
21284 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21285 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21287 /* Bit 3 is OV bit. */
21288 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21290 /* If we want bit 31, write a shift count of zero, not 32. */
21291 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21292 return;
21294 case 'T':
21295 /* Print the symbolic name of a branch target register. */
21296 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21297 && REGNO (x) != CTR_REGNO))
21298 output_operand_lossage ("invalid %%T value");
21299 else if (REGNO (x) == LR_REGNO)
21300 fputs ("lr", file);
21301 else
21302 fputs ("ctr", file);
21303 return;
21305 case 'u':
21306 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21307 for use in unsigned operand. */
21308 if (! INT_P (x))
21310 output_operand_lossage ("invalid %%u value");
21311 return;
21314 uval = INTVAL (x);
21315 if ((uval & 0xffff) == 0)
21316 uval >>= 16;
21318 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21319 return;
21321 case 'v':
21322 /* High-order 16 bits of constant for use in signed operand. */
21323 if (! INT_P (x))
21324 output_operand_lossage ("invalid %%v value");
21325 else
21326 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21327 (INTVAL (x) >> 16) & 0xffff);
21328 return;
21330 case 'U':
21331 /* Print `u' if this has an auto-increment or auto-decrement. */
21332 if (MEM_P (x)
21333 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21334 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21335 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21336 putc ('u', file);
21337 return;
21339 case 'V':
21340 /* Print the trap code for this operand. */
21341 switch (GET_CODE (x))
21343 case EQ:
21344 fputs ("eq", file); /* 4 */
21345 break;
21346 case NE:
21347 fputs ("ne", file); /* 24 */
21348 break;
21349 case LT:
21350 fputs ("lt", file); /* 16 */
21351 break;
21352 case LE:
21353 fputs ("le", file); /* 20 */
21354 break;
21355 case GT:
21356 fputs ("gt", file); /* 8 */
21357 break;
21358 case GE:
21359 fputs ("ge", file); /* 12 */
21360 break;
21361 case LTU:
21362 fputs ("llt", file); /* 2 */
21363 break;
21364 case LEU:
21365 fputs ("lle", file); /* 6 */
21366 break;
21367 case GTU:
21368 fputs ("lgt", file); /* 1 */
21369 break;
21370 case GEU:
21371 fputs ("lge", file); /* 5 */
21372 break;
21373 default:
21374 gcc_unreachable ();
21376 break;
21378 case 'w':
21379 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21380 normally. */
21381 if (INT_P (x))
21382 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21383 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21384 else
21385 print_operand (file, x, 0);
21386 return;
21388 case 'x':
21389 /* X is a FPR or Altivec register used in a VSX context. */
21390 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21391 output_operand_lossage ("invalid %%x value");
21392 else
21394 int reg = REGNO (x);
21395 int vsx_reg = (FP_REGNO_P (reg)
21396 ? reg - 32
21397 : reg - FIRST_ALTIVEC_REGNO + 32);
21399 #ifdef TARGET_REGNAMES
21400 if (TARGET_REGNAMES)
21401 fprintf (file, "%%vs%d", vsx_reg);
21402 else
21403 #endif
21404 fprintf (file, "%d", vsx_reg);
21406 return;
21408 case 'X':
21409 if (MEM_P (x)
21410 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21411 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21412 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21413 putc ('x', file);
21414 return;
21416 case 'Y':
21417 /* Like 'L', for third word of TImode/PTImode */
21418 if (REG_P (x))
21419 fputs (reg_names[REGNO (x) + 2], file);
21420 else if (MEM_P (x))
21422 machine_mode mode = GET_MODE (x);
21423 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21424 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21425 output_address (mode, plus_constant (Pmode,
21426 XEXP (XEXP (x, 0), 0), 8));
21427 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21428 output_address (mode, plus_constant (Pmode,
21429 XEXP (XEXP (x, 0), 0), 8));
21430 else
21431 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21432 if (small_data_operand (x, GET_MODE (x)))
21433 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21434 reg_names[SMALL_DATA_REG]);
21436 return;
21438 case 'z':
21439 /* X is a SYMBOL_REF. Write out the name preceded by a
21440 period and without any trailing data in brackets. Used for function
21441 names. If we are configured for System V (or the embedded ABI) on
21442 the PowerPC, do not emit the period, since those systems do not use
21443 TOCs and the like. */
21444 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21446 /* For macho, check to see if we need a stub. */
21447 if (TARGET_MACHO)
21449 const char *name = XSTR (x, 0);
21450 #if TARGET_MACHO
21451 if (darwin_emit_branch_islands
21452 && MACHOPIC_INDIRECT
21453 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21454 name = machopic_indirection_name (x, /*stub_p=*/true);
21455 #endif
21456 assemble_name (file, name);
21458 else if (!DOT_SYMBOLS)
21459 assemble_name (file, XSTR (x, 0));
21460 else
21461 rs6000_output_function_entry (file, XSTR (x, 0));
21462 return;
21464 case 'Z':
21465 /* Like 'L', for last word of TImode/PTImode. */
21466 if (REG_P (x))
21467 fputs (reg_names[REGNO (x) + 3], file);
21468 else if (MEM_P (x))
21470 machine_mode mode = GET_MODE (x);
21471 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21472 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21473 output_address (mode, plus_constant (Pmode,
21474 XEXP (XEXP (x, 0), 0), 12));
21475 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21476 output_address (mode, plus_constant (Pmode,
21477 XEXP (XEXP (x, 0), 0), 12));
21478 else
21479 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21480 if (small_data_operand (x, GET_MODE (x)))
21481 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21482 reg_names[SMALL_DATA_REG]);
21484 return;
21486 /* Print AltiVec memory operand. */
21487 case 'y':
21489 rtx tmp;
21491 gcc_assert (MEM_P (x));
21493 tmp = XEXP (x, 0);
21495 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21496 && GET_CODE (tmp) == AND
21497 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21498 && INTVAL (XEXP (tmp, 1)) == -16)
21499 tmp = XEXP (tmp, 0);
21500 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21501 && GET_CODE (tmp) == PRE_MODIFY)
21502 tmp = XEXP (tmp, 1);
21503 if (REG_P (tmp))
21504 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21505 else
21507 if (GET_CODE (tmp) != PLUS
21508 || !REG_P (XEXP (tmp, 0))
21509 || !REG_P (XEXP (tmp, 1)))
21511 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21512 break;
21515 if (REGNO (XEXP (tmp, 0)) == 0)
21516 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21517 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21518 else
21519 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21520 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21522 break;
21525 case 0:
21526 if (REG_P (x))
21527 fprintf (file, "%s", reg_names[REGNO (x)]);
21528 else if (MEM_P (x))
21530 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21531 know the width from the mode. */
21532 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21533 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21534 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21535 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21536 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21537 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21538 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21539 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21540 else
21541 output_address (GET_MODE (x), XEXP (x, 0));
21543 else
21545 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21546 /* This hack along with a corresponding hack in
21547 rs6000_output_addr_const_extra arranges to output addends
21548 where the assembler expects to find them. eg.
21549 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21550 without this hack would be output as "x@toc+4". We
21551 want "x+4@toc". */
21552 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21553 else
21554 output_addr_const (file, x);
21556 return;
21558 case '&':
21559 if (const char *name = get_some_local_dynamic_name ())
21560 assemble_name (file, name);
21561 else
21562 output_operand_lossage ("'%%&' used without any "
21563 "local dynamic TLS references");
21564 return;
21566 default:
21567 output_operand_lossage ("invalid %%xn code");
21571 /* Print the address of an operand. */
21573 void
21574 print_operand_address (FILE *file, rtx x)
21576 if (REG_P (x))
21577 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21578 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21579 || GET_CODE (x) == LABEL_REF)
21581 output_addr_const (file, x);
21582 if (small_data_operand (x, GET_MODE (x)))
21583 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21584 reg_names[SMALL_DATA_REG]);
21585 else
21586 gcc_assert (!TARGET_TOC);
21588 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21589 && REG_P (XEXP (x, 1)))
21591 if (REGNO (XEXP (x, 0)) == 0)
21592 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21593 reg_names[ REGNO (XEXP (x, 0)) ]);
21594 else
21595 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21596 reg_names[ REGNO (XEXP (x, 1)) ]);
21598 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21599 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21600 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21601 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21602 #if TARGET_MACHO
21603 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21604 && CONSTANT_P (XEXP (x, 1)))
21606 fprintf (file, "lo16(");
21607 output_addr_const (file, XEXP (x, 1));
21608 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21610 #endif
21611 #if TARGET_ELF
21612 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21613 && CONSTANT_P (XEXP (x, 1)))
21615 output_addr_const (file, XEXP (x, 1));
21616 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21618 #endif
21619 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21621 /* This hack along with a corresponding hack in
21622 rs6000_output_addr_const_extra arranges to output addends
21623 where the assembler expects to find them. eg.
21624 (lo_sum (reg 9)
21625 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21626 without this hack would be output as "x@toc+8@l(9)". We
21627 want "x+8@toc@l(9)". */
21628 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21629 if (GET_CODE (x) == LO_SUM)
21630 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21631 else
21632 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21634 else
21635 gcc_unreachable ();
21638 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21640 static bool
21641 rs6000_output_addr_const_extra (FILE *file, rtx x)
21643 if (GET_CODE (x) == UNSPEC)
21644 switch (XINT (x, 1))
21646 case UNSPEC_TOCREL:
21647 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21648 && REG_P (XVECEXP (x, 0, 1))
21649 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21650 output_addr_const (file, XVECEXP (x, 0, 0));
21651 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21653 if (INTVAL (tocrel_offset_oac) >= 0)
21654 fprintf (file, "+");
21655 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21657 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21659 putc ('-', file);
21660 assemble_name (file, toc_label_name);
21661 need_toc_init = 1;
21663 else if (TARGET_ELF)
21664 fputs ("@toc", file);
21665 return true;
21667 #if TARGET_MACHO
21668 case UNSPEC_MACHOPIC_OFFSET:
21669 output_addr_const (file, XVECEXP (x, 0, 0));
21670 putc ('-', file);
21671 machopic_output_function_base_name (file);
21672 return true;
21673 #endif
21675 return false;
21678 /* Target hook for assembling integer objects. The PowerPC version has
21679 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21680 is defined. It also needs to handle DI-mode objects on 64-bit
21681 targets. */
21683 static bool
21684 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21686 #ifdef RELOCATABLE_NEEDS_FIXUP
21687 /* Special handling for SI values. */
21688 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21690 static int recurse = 0;
21692 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21693 the .fixup section. Since the TOC section is already relocated, we
21694 don't need to mark it here. We used to skip the text section, but it
21695 should never be valid for relocated addresses to be placed in the text
21696 section. */
21697 if (DEFAULT_ABI == ABI_V4
21698 && (TARGET_RELOCATABLE || flag_pic > 1)
21699 && in_section != toc_section
21700 && !recurse
21701 && !CONST_SCALAR_INT_P (x)
21702 && CONSTANT_P (x))
21704 char buf[256];
21706 recurse = 1;
21707 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21708 fixuplabelno++;
21709 ASM_OUTPUT_LABEL (asm_out_file, buf);
21710 fprintf (asm_out_file, "\t.long\t(");
21711 output_addr_const (asm_out_file, x);
21712 fprintf (asm_out_file, ")@fixup\n");
21713 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21714 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21715 fprintf (asm_out_file, "\t.long\t");
21716 assemble_name (asm_out_file, buf);
21717 fprintf (asm_out_file, "\n\t.previous\n");
21718 recurse = 0;
21719 return true;
21721 /* Remove initial .'s to turn a -mcall-aixdesc function
21722 address into the address of the descriptor, not the function
21723 itself. */
21724 else if (GET_CODE (x) == SYMBOL_REF
21725 && XSTR (x, 0)[0] == '.'
21726 && DEFAULT_ABI == ABI_AIX)
21728 const char *name = XSTR (x, 0);
21729 while (*name == '.')
21730 name++;
21732 fprintf (asm_out_file, "\t.long\t%s\n", name);
21733 return true;
21736 #endif /* RELOCATABLE_NEEDS_FIXUP */
21737 return default_assemble_integer (x, size, aligned_p);
21740 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21741 /* Emit an assembler directive to set symbol visibility for DECL to
21742 VISIBILITY_TYPE. */
21744 static void
21745 rs6000_assemble_visibility (tree decl, int vis)
21747 if (TARGET_XCOFF)
21748 return;
21750 /* Functions need to have their entry point symbol visibility set as
21751 well as their descriptor symbol visibility. */
21752 if (DEFAULT_ABI == ABI_AIX
21753 && DOT_SYMBOLS
21754 && TREE_CODE (decl) == FUNCTION_DECL)
21756 static const char * const visibility_types[] = {
21757 NULL, "protected", "hidden", "internal"
21760 const char *name, *type;
21762 name = ((* targetm.strip_name_encoding)
21763 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21764 type = visibility_types[vis];
21766 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21767 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21769 else
21770 default_assemble_visibility (decl, vis);
21772 #endif
21774 enum rtx_code
21775 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21777 /* Reversal of FP compares takes care -- an ordered compare
21778 becomes an unordered compare and vice versa. */
21779 if (mode == CCFPmode
21780 && (!flag_finite_math_only
21781 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21782 || code == UNEQ || code == LTGT))
21783 return reverse_condition_maybe_unordered (code);
21784 else
21785 return reverse_condition (code);
21788 /* Generate a compare for CODE. Return a brand-new rtx that
21789 represents the result of the compare. */
21791 static rtx
21792 rs6000_generate_compare (rtx cmp, machine_mode mode)
21794 machine_mode comp_mode;
21795 rtx compare_result;
21796 enum rtx_code code = GET_CODE (cmp);
21797 rtx op0 = XEXP (cmp, 0);
21798 rtx op1 = XEXP (cmp, 1);
21800 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21801 comp_mode = CCmode;
21802 else if (FLOAT_MODE_P (mode))
21803 comp_mode = CCFPmode;
21804 else if (code == GTU || code == LTU
21805 || code == GEU || code == LEU)
21806 comp_mode = CCUNSmode;
21807 else if ((code == EQ || code == NE)
21808 && unsigned_reg_p (op0)
21809 && (unsigned_reg_p (op1)
21810 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21811 /* These are unsigned values, perhaps there will be a later
21812 ordering compare that can be shared with this one. */
21813 comp_mode = CCUNSmode;
21814 else
21815 comp_mode = CCmode;
21817 /* If we have an unsigned compare, make sure we don't have a signed value as
21818 an immediate. */
21819 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21820 && INTVAL (op1) < 0)
21822 op0 = copy_rtx_if_shared (op0);
21823 op1 = force_reg (GET_MODE (op0), op1);
21824 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21827 /* First, the compare. */
21828 compare_result = gen_reg_rtx (comp_mode);
21830 /* IEEE 128-bit support in VSX registers when we do not have hardware
21831 support. */
21832 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21834 rtx libfunc = NULL_RTX;
21835 bool check_nan = false;
21836 rtx dest;
21838 switch (code)
21840 case EQ:
21841 case NE:
21842 libfunc = optab_libfunc (eq_optab, mode);
21843 break;
21845 case GT:
21846 case GE:
21847 libfunc = optab_libfunc (ge_optab, mode);
21848 break;
21850 case LT:
21851 case LE:
21852 libfunc = optab_libfunc (le_optab, mode);
21853 break;
21855 case UNORDERED:
21856 case ORDERED:
21857 libfunc = optab_libfunc (unord_optab, mode);
21858 code = (code == UNORDERED) ? NE : EQ;
21859 break;
21861 case UNGE:
21862 case UNGT:
21863 check_nan = true;
21864 libfunc = optab_libfunc (ge_optab, mode);
21865 code = (code == UNGE) ? GE : GT;
21866 break;
21868 case UNLE:
21869 case UNLT:
21870 check_nan = true;
21871 libfunc = optab_libfunc (le_optab, mode);
21872 code = (code == UNLE) ? LE : LT;
21873 break;
21875 case UNEQ:
21876 case LTGT:
21877 check_nan = true;
21878 libfunc = optab_libfunc (eq_optab, mode);
21879 code = (code = UNEQ) ? EQ : NE;
21880 break;
21882 default:
21883 gcc_unreachable ();
21886 gcc_assert (libfunc);
21888 if (!check_nan)
21889 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21890 SImode, op0, mode, op1, mode);
21892 /* The library signals an exception for signalling NaNs, so we need to
21893 handle isgreater, etc. by first checking isordered. */
21894 else
21896 rtx ne_rtx, normal_dest, unord_dest;
21897 rtx unord_func = optab_libfunc (unord_optab, mode);
21898 rtx join_label = gen_label_rtx ();
21899 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21900 rtx unord_cmp = gen_reg_rtx (comp_mode);
21903 /* Test for either value being a NaN. */
21904 gcc_assert (unord_func);
21905 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21906 SImode, op0, mode, op1, mode);
21908 /* Set value (0) if either value is a NaN, and jump to the join
21909 label. */
21910 dest = gen_reg_rtx (SImode);
21911 emit_move_insn (dest, const1_rtx);
21912 emit_insn (gen_rtx_SET (unord_cmp,
21913 gen_rtx_COMPARE (comp_mode, unord_dest,
21914 const0_rtx)));
21916 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21917 emit_jump_insn (gen_rtx_SET (pc_rtx,
21918 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21919 join_ref,
21920 pc_rtx)));
21922 /* Do the normal comparison, knowing that the values are not
21923 NaNs. */
21924 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21925 SImode, op0, mode, op1, mode);
21927 emit_insn (gen_cstoresi4 (dest,
21928 gen_rtx_fmt_ee (code, SImode, normal_dest,
21929 const0_rtx),
21930 normal_dest, const0_rtx));
21932 /* Join NaN and non-Nan paths. Compare dest against 0. */
21933 emit_label (join_label);
21934 code = NE;
21937 emit_insn (gen_rtx_SET (compare_result,
21938 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21941 else
21943 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21944 CLOBBERs to match cmptf_internal2 pattern. */
21945 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21946 && FLOAT128_IBM_P (GET_MODE (op0))
21947 && TARGET_HARD_FLOAT)
21948 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21949 gen_rtvec (10,
21950 gen_rtx_SET (compare_result,
21951 gen_rtx_COMPARE (comp_mode, op0, op1)),
21952 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21953 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21954 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21955 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21956 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21957 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21958 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21959 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21960 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21961 else if (GET_CODE (op1) == UNSPEC
21962 && XINT (op1, 1) == UNSPEC_SP_TEST)
21964 rtx op1b = XVECEXP (op1, 0, 0);
21965 comp_mode = CCEQmode;
21966 compare_result = gen_reg_rtx (CCEQmode);
21967 if (TARGET_64BIT)
21968 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21969 else
21970 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21972 else
21973 emit_insn (gen_rtx_SET (compare_result,
21974 gen_rtx_COMPARE (comp_mode, op0, op1)));
21977 /* Some kinds of FP comparisons need an OR operation;
21978 under flag_finite_math_only we don't bother. */
21979 if (FLOAT_MODE_P (mode)
21980 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21981 && !flag_finite_math_only
21982 && (code == LE || code == GE
21983 || code == UNEQ || code == LTGT
21984 || code == UNGT || code == UNLT))
21986 enum rtx_code or1, or2;
21987 rtx or1_rtx, or2_rtx, compare2_rtx;
21988 rtx or_result = gen_reg_rtx (CCEQmode);
21990 switch (code)
21992 case LE: or1 = LT; or2 = EQ; break;
21993 case GE: or1 = GT; or2 = EQ; break;
21994 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21995 case LTGT: or1 = LT; or2 = GT; break;
21996 case UNGT: or1 = UNORDERED; or2 = GT; break;
21997 case UNLT: or1 = UNORDERED; or2 = LT; break;
21998 default: gcc_unreachable ();
22000 validate_condition_mode (or1, comp_mode);
22001 validate_condition_mode (or2, comp_mode);
22002 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22003 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22004 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22005 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22006 const_true_rtx);
22007 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22009 compare_result = or_result;
22010 code = EQ;
22013 validate_condition_mode (code, GET_MODE (compare_result));
22015 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22019 /* Return the diagnostic message string if the binary operation OP is
22020 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22022 static const char*
22023 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22024 const_tree type1,
22025 const_tree type2)
22027 machine_mode mode1 = TYPE_MODE (type1);
22028 machine_mode mode2 = TYPE_MODE (type2);
22030 /* For complex modes, use the inner type. */
22031 if (COMPLEX_MODE_P (mode1))
22032 mode1 = GET_MODE_INNER (mode1);
22034 if (COMPLEX_MODE_P (mode2))
22035 mode2 = GET_MODE_INNER (mode2);
22037 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22038 double to intermix unless -mfloat128-convert. */
22039 if (mode1 == mode2)
22040 return NULL;
22042 if (!TARGET_FLOAT128_CVT)
22044 if ((mode1 == KFmode && mode2 == IFmode)
22045 || (mode1 == IFmode && mode2 == KFmode))
22046 return N_("__float128 and __ibm128 cannot be used in the same "
22047 "expression");
22049 if (TARGET_IEEEQUAD
22050 && ((mode1 == IFmode && mode2 == TFmode)
22051 || (mode1 == TFmode && mode2 == IFmode)))
22052 return N_("__ibm128 and long double cannot be used in the same "
22053 "expression");
22055 if (!TARGET_IEEEQUAD
22056 && ((mode1 == KFmode && mode2 == TFmode)
22057 || (mode1 == TFmode && mode2 == KFmode)))
22058 return N_("__float128 and long double cannot be used in the same "
22059 "expression");
22062 return NULL;
22066 /* Expand floating point conversion to/from __float128 and __ibm128. */
22068 void
22069 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22071 machine_mode dest_mode = GET_MODE (dest);
22072 machine_mode src_mode = GET_MODE (src);
22073 convert_optab cvt = unknown_optab;
22074 bool do_move = false;
22075 rtx libfunc = NULL_RTX;
22076 rtx dest2;
22077 typedef rtx (*rtx_2func_t) (rtx, rtx);
22078 rtx_2func_t hw_convert = (rtx_2func_t)0;
22079 size_t kf_or_tf;
22081 struct hw_conv_t {
22082 rtx_2func_t from_df;
22083 rtx_2func_t from_sf;
22084 rtx_2func_t from_si_sign;
22085 rtx_2func_t from_si_uns;
22086 rtx_2func_t from_di_sign;
22087 rtx_2func_t from_di_uns;
22088 rtx_2func_t to_df;
22089 rtx_2func_t to_sf;
22090 rtx_2func_t to_si_sign;
22091 rtx_2func_t to_si_uns;
22092 rtx_2func_t to_di_sign;
22093 rtx_2func_t to_di_uns;
22094 } hw_conversions[2] = {
22095 /* convertions to/from KFmode */
22097 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22098 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22099 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22100 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22101 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22102 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22103 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22104 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22105 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22106 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22107 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22108 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22111 /* convertions to/from TFmode */
22113 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22114 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22115 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22116 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22117 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22118 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22119 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22120 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22121 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22122 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22123 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22124 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22128 if (dest_mode == src_mode)
22129 gcc_unreachable ();
22131 /* Eliminate memory operations. */
22132 if (MEM_P (src))
22133 src = force_reg (src_mode, src);
22135 if (MEM_P (dest))
22137 rtx tmp = gen_reg_rtx (dest_mode);
22138 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22139 rs6000_emit_move (dest, tmp, dest_mode);
22140 return;
22143 /* Convert to IEEE 128-bit floating point. */
22144 if (FLOAT128_IEEE_P (dest_mode))
22146 if (dest_mode == KFmode)
22147 kf_or_tf = 0;
22148 else if (dest_mode == TFmode)
22149 kf_or_tf = 1;
22150 else
22151 gcc_unreachable ();
22153 switch (src_mode)
22155 case E_DFmode:
22156 cvt = sext_optab;
22157 hw_convert = hw_conversions[kf_or_tf].from_df;
22158 break;
22160 case E_SFmode:
22161 cvt = sext_optab;
22162 hw_convert = hw_conversions[kf_or_tf].from_sf;
22163 break;
22165 case E_KFmode:
22166 case E_IFmode:
22167 case E_TFmode:
22168 if (FLOAT128_IBM_P (src_mode))
22169 cvt = sext_optab;
22170 else
22171 do_move = true;
22172 break;
22174 case E_SImode:
22175 if (unsigned_p)
22177 cvt = ufloat_optab;
22178 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22180 else
22182 cvt = sfloat_optab;
22183 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22185 break;
22187 case E_DImode:
22188 if (unsigned_p)
22190 cvt = ufloat_optab;
22191 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22193 else
22195 cvt = sfloat_optab;
22196 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22198 break;
22200 default:
22201 gcc_unreachable ();
22205 /* Convert from IEEE 128-bit floating point. */
22206 else if (FLOAT128_IEEE_P (src_mode))
22208 if (src_mode == KFmode)
22209 kf_or_tf = 0;
22210 else if (src_mode == TFmode)
22211 kf_or_tf = 1;
22212 else
22213 gcc_unreachable ();
22215 switch (dest_mode)
22217 case E_DFmode:
22218 cvt = trunc_optab;
22219 hw_convert = hw_conversions[kf_or_tf].to_df;
22220 break;
22222 case E_SFmode:
22223 cvt = trunc_optab;
22224 hw_convert = hw_conversions[kf_or_tf].to_sf;
22225 break;
22227 case E_KFmode:
22228 case E_IFmode:
22229 case E_TFmode:
22230 if (FLOAT128_IBM_P (dest_mode))
22231 cvt = trunc_optab;
22232 else
22233 do_move = true;
22234 break;
22236 case E_SImode:
22237 if (unsigned_p)
22239 cvt = ufix_optab;
22240 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22242 else
22244 cvt = sfix_optab;
22245 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22247 break;
22249 case E_DImode:
22250 if (unsigned_p)
22252 cvt = ufix_optab;
22253 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22255 else
22257 cvt = sfix_optab;
22258 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22260 break;
22262 default:
22263 gcc_unreachable ();
22267 /* Both IBM format. */
22268 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22269 do_move = true;
22271 else
22272 gcc_unreachable ();
22274 /* Handle conversion between TFmode/KFmode. */
22275 if (do_move)
22276 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22278 /* Handle conversion if we have hardware support. */
22279 else if (TARGET_FLOAT128_HW && hw_convert)
22280 emit_insn ((hw_convert) (dest, src));
22282 /* Call an external function to do the conversion. */
22283 else if (cvt != unknown_optab)
22285 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22286 gcc_assert (libfunc != NULL_RTX);
22288 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22289 src, src_mode);
22291 gcc_assert (dest2 != NULL_RTX);
22292 if (!rtx_equal_p (dest, dest2))
22293 emit_move_insn (dest, dest2);
22296 else
22297 gcc_unreachable ();
22299 return;
22303 /* Emit the RTL for an sISEL pattern. */
22305 void
22306 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22308 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22311 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22312 can be used as that dest register. Return the dest register. */
22315 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22317 if (op2 == const0_rtx)
22318 return op1;
22320 if (GET_CODE (scratch) == SCRATCH)
22321 scratch = gen_reg_rtx (mode);
22323 if (logical_operand (op2, mode))
22324 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22325 else
22326 emit_insn (gen_rtx_SET (scratch,
22327 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22329 return scratch;
22332 void
22333 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22335 rtx condition_rtx;
22336 machine_mode op_mode;
22337 enum rtx_code cond_code;
22338 rtx result = operands[0];
22340 condition_rtx = rs6000_generate_compare (operands[1], mode);
22341 cond_code = GET_CODE (condition_rtx);
22343 if (cond_code == NE
22344 || cond_code == GE || cond_code == LE
22345 || cond_code == GEU || cond_code == LEU
22346 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22348 rtx not_result = gen_reg_rtx (CCEQmode);
22349 rtx not_op, rev_cond_rtx;
22350 machine_mode cc_mode;
22352 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22354 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22355 SImode, XEXP (condition_rtx, 0), const0_rtx);
22356 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22357 emit_insn (gen_rtx_SET (not_result, not_op));
22358 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22361 op_mode = GET_MODE (XEXP (operands[1], 0));
22362 if (op_mode == VOIDmode)
22363 op_mode = GET_MODE (XEXP (operands[1], 1));
22365 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22367 PUT_MODE (condition_rtx, DImode);
22368 convert_move (result, condition_rtx, 0);
22370 else
22372 PUT_MODE (condition_rtx, SImode);
22373 emit_insn (gen_rtx_SET (result, condition_rtx));
22377 /* Emit a branch of kind CODE to location LOC. */
22379 void
22380 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22382 rtx condition_rtx, loc_ref;
22384 condition_rtx = rs6000_generate_compare (operands[0], mode);
22385 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22386 emit_jump_insn (gen_rtx_SET (pc_rtx,
22387 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22388 loc_ref, pc_rtx)));
22391 /* Return the string to output a conditional branch to LABEL, which is
22392 the operand template of the label, or NULL if the branch is really a
22393 conditional return.
22395 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22396 condition code register and its mode specifies what kind of
22397 comparison we made.
22399 REVERSED is nonzero if we should reverse the sense of the comparison.
22401 INSN is the insn. */
22403 char *
22404 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22406 static char string[64];
22407 enum rtx_code code = GET_CODE (op);
22408 rtx cc_reg = XEXP (op, 0);
22409 machine_mode mode = GET_MODE (cc_reg);
22410 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22411 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22412 int really_reversed = reversed ^ need_longbranch;
22413 char *s = string;
22414 const char *ccode;
22415 const char *pred;
22416 rtx note;
22418 validate_condition_mode (code, mode);
22420 /* Work out which way this really branches. We could use
22421 reverse_condition_maybe_unordered here always but this
22422 makes the resulting assembler clearer. */
22423 if (really_reversed)
22425 /* Reversal of FP compares takes care -- an ordered compare
22426 becomes an unordered compare and vice versa. */
22427 if (mode == CCFPmode)
22428 code = reverse_condition_maybe_unordered (code);
22429 else
22430 code = reverse_condition (code);
22433 switch (code)
22435 /* Not all of these are actually distinct opcodes, but
22436 we distinguish them for clarity of the resulting assembler. */
22437 case NE: case LTGT:
22438 ccode = "ne"; break;
22439 case EQ: case UNEQ:
22440 ccode = "eq"; break;
22441 case GE: case GEU:
22442 ccode = "ge"; break;
22443 case GT: case GTU: case UNGT:
22444 ccode = "gt"; break;
22445 case LE: case LEU:
22446 ccode = "le"; break;
22447 case LT: case LTU: case UNLT:
22448 ccode = "lt"; break;
22449 case UNORDERED: ccode = "un"; break;
22450 case ORDERED: ccode = "nu"; break;
22451 case UNGE: ccode = "nl"; break;
22452 case UNLE: ccode = "ng"; break;
22453 default:
22454 gcc_unreachable ();
22457 /* Maybe we have a guess as to how likely the branch is. */
22458 pred = "";
22459 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22460 if (note != NULL_RTX)
22462 /* PROB is the difference from 50%. */
22463 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22464 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22466 /* Only hint for highly probable/improbable branches on newer cpus when
22467 we have real profile data, as static prediction overrides processor
22468 dynamic prediction. For older cpus we may as well always hint, but
22469 assume not taken for branches that are very close to 50% as a
22470 mispredicted taken branch is more expensive than a
22471 mispredicted not-taken branch. */
22472 if (rs6000_always_hint
22473 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22474 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22475 && br_prob_note_reliable_p (note)))
22477 if (abs (prob) > REG_BR_PROB_BASE / 20
22478 && ((prob > 0) ^ need_longbranch))
22479 pred = "+";
22480 else
22481 pred = "-";
22485 if (label == NULL)
22486 s += sprintf (s, "b%slr%s ", ccode, pred);
22487 else
22488 s += sprintf (s, "b%s%s ", ccode, pred);
22490 /* We need to escape any '%' characters in the reg_names string.
22491 Assume they'd only be the first character.... */
22492 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22493 *s++ = '%';
22494 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22496 if (label != NULL)
22498 /* If the branch distance was too far, we may have to use an
22499 unconditional branch to go the distance. */
22500 if (need_longbranch)
22501 s += sprintf (s, ",$+8\n\tb %s", label);
22502 else
22503 s += sprintf (s, ",%s", label);
22506 return string;
22509 /* Return insn for VSX or Altivec comparisons. */
22511 static rtx
22512 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22514 rtx mask;
22515 machine_mode mode = GET_MODE (op0);
22517 switch (code)
22519 default:
22520 break;
22522 case GE:
22523 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22524 return NULL_RTX;
22525 /* FALLTHRU */
22527 case EQ:
22528 case GT:
22529 case GTU:
22530 case ORDERED:
22531 case UNORDERED:
22532 case UNEQ:
22533 case LTGT:
22534 mask = gen_reg_rtx (mode);
22535 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22536 return mask;
22539 return NULL_RTX;
22542 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22543 DMODE is expected destination mode. This is a recursive function. */
22545 static rtx
22546 rs6000_emit_vector_compare (enum rtx_code rcode,
22547 rtx op0, rtx op1,
22548 machine_mode dmode)
22550 rtx mask;
22551 bool swap_operands = false;
22552 bool try_again = false;
22554 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22555 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22557 /* See if the comparison works as is. */
22558 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22559 if (mask)
22560 return mask;
22562 switch (rcode)
22564 case LT:
22565 rcode = GT;
22566 swap_operands = true;
22567 try_again = true;
22568 break;
22569 case LTU:
22570 rcode = GTU;
22571 swap_operands = true;
22572 try_again = true;
22573 break;
22574 case NE:
22575 case UNLE:
22576 case UNLT:
22577 case UNGE:
22578 case UNGT:
22579 /* Invert condition and try again.
22580 e.g., A != B becomes ~(A==B). */
22582 enum rtx_code rev_code;
22583 enum insn_code nor_code;
22584 rtx mask2;
22586 rev_code = reverse_condition_maybe_unordered (rcode);
22587 if (rev_code == UNKNOWN)
22588 return NULL_RTX;
22590 nor_code = optab_handler (one_cmpl_optab, dmode);
22591 if (nor_code == CODE_FOR_nothing)
22592 return NULL_RTX;
22594 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22595 if (!mask2)
22596 return NULL_RTX;
22598 mask = gen_reg_rtx (dmode);
22599 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22600 return mask;
22602 break;
22603 case GE:
22604 case GEU:
22605 case LE:
22606 case LEU:
22607 /* Try GT/GTU/LT/LTU OR EQ */
22609 rtx c_rtx, eq_rtx;
22610 enum insn_code ior_code;
22611 enum rtx_code new_code;
22613 switch (rcode)
22615 case GE:
22616 new_code = GT;
22617 break;
22619 case GEU:
22620 new_code = GTU;
22621 break;
22623 case LE:
22624 new_code = LT;
22625 break;
22627 case LEU:
22628 new_code = LTU;
22629 break;
22631 default:
22632 gcc_unreachable ();
22635 ior_code = optab_handler (ior_optab, dmode);
22636 if (ior_code == CODE_FOR_nothing)
22637 return NULL_RTX;
22639 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22640 if (!c_rtx)
22641 return NULL_RTX;
22643 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22644 if (!eq_rtx)
22645 return NULL_RTX;
22647 mask = gen_reg_rtx (dmode);
22648 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22649 return mask;
22651 break;
22652 default:
22653 return NULL_RTX;
22656 if (try_again)
22658 if (swap_operands)
22659 std::swap (op0, op1);
22661 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22662 if (mask)
22663 return mask;
22666 /* You only get two chances. */
22667 return NULL_RTX;
22670 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22671 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22672 operands for the relation operation COND. */
22675 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22676 rtx cond, rtx cc_op0, rtx cc_op1)
22678 machine_mode dest_mode = GET_MODE (dest);
22679 machine_mode mask_mode = GET_MODE (cc_op0);
22680 enum rtx_code rcode = GET_CODE (cond);
22681 machine_mode cc_mode = CCmode;
22682 rtx mask;
22683 rtx cond2;
22684 bool invert_move = false;
22686 if (VECTOR_UNIT_NONE_P (dest_mode))
22687 return 0;
22689 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22690 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22692 switch (rcode)
22694 /* Swap operands if we can, and fall back to doing the operation as
22695 specified, and doing a NOR to invert the test. */
22696 case NE:
22697 case UNLE:
22698 case UNLT:
22699 case UNGE:
22700 case UNGT:
22701 /* Invert condition and try again.
22702 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22703 invert_move = true;
22704 rcode = reverse_condition_maybe_unordered (rcode);
22705 if (rcode == UNKNOWN)
22706 return 0;
22707 break;
22709 case GE:
22710 case LE:
22711 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22713 /* Invert condition to avoid compound test. */
22714 invert_move = true;
22715 rcode = reverse_condition (rcode);
22717 break;
22719 case GTU:
22720 case GEU:
22721 case LTU:
22722 case LEU:
22723 /* Mark unsigned tests with CCUNSmode. */
22724 cc_mode = CCUNSmode;
22726 /* Invert condition to avoid compound test if necessary. */
22727 if (rcode == GEU || rcode == LEU)
22729 invert_move = true;
22730 rcode = reverse_condition (rcode);
22732 break;
22734 default:
22735 break;
22738 /* Get the vector mask for the given relational operations. */
22739 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22741 if (!mask)
22742 return 0;
22744 if (invert_move)
22745 std::swap (op_true, op_false);
22747 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22748 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22749 && (GET_CODE (op_true) == CONST_VECTOR
22750 || GET_CODE (op_false) == CONST_VECTOR))
22752 rtx constant_0 = CONST0_RTX (dest_mode);
22753 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22755 if (op_true == constant_m1 && op_false == constant_0)
22757 emit_move_insn (dest, mask);
22758 return 1;
22761 else if (op_true == constant_0 && op_false == constant_m1)
22763 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22764 return 1;
22767 /* If we can't use the vector comparison directly, perhaps we can use
22768 the mask for the true or false fields, instead of loading up a
22769 constant. */
22770 if (op_true == constant_m1)
22771 op_true = mask;
22773 if (op_false == constant_0)
22774 op_false = mask;
22777 if (!REG_P (op_true) && !SUBREG_P (op_true))
22778 op_true = force_reg (dest_mode, op_true);
22780 if (!REG_P (op_false) && !SUBREG_P (op_false))
22781 op_false = force_reg (dest_mode, op_false);
22783 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22784 CONST0_RTX (dest_mode));
22785 emit_insn (gen_rtx_SET (dest,
22786 gen_rtx_IF_THEN_ELSE (dest_mode,
22787 cond2,
22788 op_true,
22789 op_false)));
22790 return 1;
22793 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22794 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22795 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22796 hardware has no such operation. */
22798 static int
22799 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22801 enum rtx_code code = GET_CODE (op);
22802 rtx op0 = XEXP (op, 0);
22803 rtx op1 = XEXP (op, 1);
22804 machine_mode compare_mode = GET_MODE (op0);
22805 machine_mode result_mode = GET_MODE (dest);
22806 bool max_p = false;
22808 if (result_mode != compare_mode)
22809 return 0;
22811 if (code == GE || code == GT)
22812 max_p = true;
22813 else if (code == LE || code == LT)
22814 max_p = false;
22815 else
22816 return 0;
22818 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22821 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22822 max_p = !max_p;
22824 else
22825 return 0;
22827 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22828 return 1;
22831 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22832 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22833 operands of the last comparison is nonzero/true, FALSE_COND if it is
22834 zero/false. Return 0 if the hardware has no such operation. */
22836 static int
22837 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22839 enum rtx_code code = GET_CODE (op);
22840 rtx op0 = XEXP (op, 0);
22841 rtx op1 = XEXP (op, 1);
22842 machine_mode result_mode = GET_MODE (dest);
22843 rtx compare_rtx;
22844 rtx cmove_rtx;
22845 rtx clobber_rtx;
22847 if (!can_create_pseudo_p ())
22848 return 0;
22850 switch (code)
22852 case EQ:
22853 case GE:
22854 case GT:
22855 break;
22857 case NE:
22858 case LT:
22859 case LE:
22860 code = swap_condition (code);
22861 std::swap (op0, op1);
22862 break;
22864 default:
22865 return 0;
22868 /* Generate: [(parallel [(set (dest)
22869 (if_then_else (op (cmp1) (cmp2))
22870 (true)
22871 (false)))
22872 (clobber (scratch))])]. */
22874 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22875 cmove_rtx = gen_rtx_SET (dest,
22876 gen_rtx_IF_THEN_ELSE (result_mode,
22877 compare_rtx,
22878 true_cond,
22879 false_cond));
22881 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22882 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22883 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22885 return 1;
22888 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22889 operands of the last comparison is nonzero/true, FALSE_COND if it
22890 is zero/false. Return 0 if the hardware has no such operation. */
22893 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22895 enum rtx_code code = GET_CODE (op);
22896 rtx op0 = XEXP (op, 0);
22897 rtx op1 = XEXP (op, 1);
22898 machine_mode compare_mode = GET_MODE (op0);
22899 machine_mode result_mode = GET_MODE (dest);
22900 rtx temp;
22901 bool is_against_zero;
22903 /* These modes should always match. */
22904 if (GET_MODE (op1) != compare_mode
22905 /* In the isel case however, we can use a compare immediate, so
22906 op1 may be a small constant. */
22907 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22908 return 0;
22909 if (GET_MODE (true_cond) != result_mode)
22910 return 0;
22911 if (GET_MODE (false_cond) != result_mode)
22912 return 0;
22914 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22915 if (TARGET_P9_MINMAX
22916 && (compare_mode == SFmode || compare_mode == DFmode)
22917 && (result_mode == SFmode || result_mode == DFmode))
22919 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22920 return 1;
22922 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22923 return 1;
22926 /* Don't allow using floating point comparisons for integer results for
22927 now. */
22928 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22929 return 0;
22931 /* First, work out if the hardware can do this at all, or
22932 if it's too slow.... */
22933 if (!FLOAT_MODE_P (compare_mode))
22935 if (TARGET_ISEL)
22936 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22937 return 0;
22940 is_against_zero = op1 == CONST0_RTX (compare_mode);
22942 /* A floating-point subtract might overflow, underflow, or produce
22943 an inexact result, thus changing the floating-point flags, so it
22944 can't be generated if we care about that. It's safe if one side
22945 of the construct is zero, since then no subtract will be
22946 generated. */
22947 if (SCALAR_FLOAT_MODE_P (compare_mode)
22948 && flag_trapping_math && ! is_against_zero)
22949 return 0;
22951 /* Eliminate half of the comparisons by switching operands, this
22952 makes the remaining code simpler. */
22953 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22954 || code == LTGT || code == LT || code == UNLE)
22956 code = reverse_condition_maybe_unordered (code);
22957 temp = true_cond;
22958 true_cond = false_cond;
22959 false_cond = temp;
22962 /* UNEQ and LTGT take four instructions for a comparison with zero,
22963 it'll probably be faster to use a branch here too. */
22964 if (code == UNEQ && HONOR_NANS (compare_mode))
22965 return 0;
22967 /* We're going to try to implement comparisons by performing
22968 a subtract, then comparing against zero. Unfortunately,
22969 Inf - Inf is NaN which is not zero, and so if we don't
22970 know that the operand is finite and the comparison
22971 would treat EQ different to UNORDERED, we can't do it. */
22972 if (HONOR_INFINITIES (compare_mode)
22973 && code != GT && code != UNGE
22974 && (GET_CODE (op1) != CONST_DOUBLE
22975 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22976 /* Constructs of the form (a OP b ? a : b) are safe. */
22977 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22978 || (! rtx_equal_p (op0, true_cond)
22979 && ! rtx_equal_p (op1, true_cond))))
22980 return 0;
22982 /* At this point we know we can use fsel. */
22984 /* Reduce the comparison to a comparison against zero. */
22985 if (! is_against_zero)
22987 temp = gen_reg_rtx (compare_mode);
22988 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22989 op0 = temp;
22990 op1 = CONST0_RTX (compare_mode);
22993 /* If we don't care about NaNs we can reduce some of the comparisons
22994 down to faster ones. */
22995 if (! HONOR_NANS (compare_mode))
22996 switch (code)
22998 case GT:
22999 code = LE;
23000 temp = true_cond;
23001 true_cond = false_cond;
23002 false_cond = temp;
23003 break;
23004 case UNGE:
23005 code = GE;
23006 break;
23007 case UNEQ:
23008 code = EQ;
23009 break;
23010 default:
23011 break;
23014 /* Now, reduce everything down to a GE. */
23015 switch (code)
23017 case GE:
23018 break;
23020 case LE:
23021 temp = gen_reg_rtx (compare_mode);
23022 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23023 op0 = temp;
23024 break;
23026 case ORDERED:
23027 temp = gen_reg_rtx (compare_mode);
23028 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23029 op0 = temp;
23030 break;
23032 case EQ:
23033 temp = gen_reg_rtx (compare_mode);
23034 emit_insn (gen_rtx_SET (temp,
23035 gen_rtx_NEG (compare_mode,
23036 gen_rtx_ABS (compare_mode, op0))));
23037 op0 = temp;
23038 break;
23040 case UNGE:
23041 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23042 temp = gen_reg_rtx (result_mode);
23043 emit_insn (gen_rtx_SET (temp,
23044 gen_rtx_IF_THEN_ELSE (result_mode,
23045 gen_rtx_GE (VOIDmode,
23046 op0, op1),
23047 true_cond, false_cond)));
23048 false_cond = true_cond;
23049 true_cond = temp;
23051 temp = gen_reg_rtx (compare_mode);
23052 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23053 op0 = temp;
23054 break;
23056 case GT:
23057 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23058 temp = gen_reg_rtx (result_mode);
23059 emit_insn (gen_rtx_SET (temp,
23060 gen_rtx_IF_THEN_ELSE (result_mode,
23061 gen_rtx_GE (VOIDmode,
23062 op0, op1),
23063 true_cond, false_cond)));
23064 true_cond = false_cond;
23065 false_cond = temp;
23067 temp = gen_reg_rtx (compare_mode);
23068 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23069 op0 = temp;
23070 break;
23072 default:
23073 gcc_unreachable ();
23076 emit_insn (gen_rtx_SET (dest,
23077 gen_rtx_IF_THEN_ELSE (result_mode,
23078 gen_rtx_GE (VOIDmode,
23079 op0, op1),
23080 true_cond, false_cond)));
23081 return 1;
23084 /* Same as above, but for ints (isel). */
23086 static int
23087 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23089 rtx condition_rtx, cr;
23090 machine_mode mode = GET_MODE (dest);
23091 enum rtx_code cond_code;
23092 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23093 bool signedp;
23095 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23096 return 0;
23098 /* We still have to do the compare, because isel doesn't do a
23099 compare, it just looks at the CRx bits set by a previous compare
23100 instruction. */
23101 condition_rtx = rs6000_generate_compare (op, mode);
23102 cond_code = GET_CODE (condition_rtx);
23103 cr = XEXP (condition_rtx, 0);
23104 signedp = GET_MODE (cr) == CCmode;
23106 isel_func = (mode == SImode
23107 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23108 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23110 switch (cond_code)
23112 case LT: case GT: case LTU: case GTU: case EQ:
23113 /* isel handles these directly. */
23114 break;
23116 default:
23117 /* We need to swap the sense of the comparison. */
23119 std::swap (false_cond, true_cond);
23120 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23122 break;
23125 false_cond = force_reg (mode, false_cond);
23126 if (true_cond != const0_rtx)
23127 true_cond = force_reg (mode, true_cond);
23129 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23131 return 1;
23134 const char *
23135 output_isel (rtx *operands)
23137 enum rtx_code code;
23139 code = GET_CODE (operands[1]);
23141 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23143 gcc_assert (GET_CODE (operands[2]) == REG
23144 && GET_CODE (operands[3]) == REG);
23145 PUT_CODE (operands[1], reverse_condition (code));
23146 return "isel %0,%3,%2,%j1";
23149 return "isel %0,%2,%3,%j1";
23152 void
23153 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23155 machine_mode mode = GET_MODE (op0);
23156 enum rtx_code c;
23157 rtx target;
23159 /* VSX/altivec have direct min/max insns. */
23160 if ((code == SMAX || code == SMIN)
23161 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23162 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23164 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23165 return;
23168 if (code == SMAX || code == SMIN)
23169 c = GE;
23170 else
23171 c = GEU;
23173 if (code == SMAX || code == UMAX)
23174 target = emit_conditional_move (dest, c, op0, op1, mode,
23175 op0, op1, mode, 0);
23176 else
23177 target = emit_conditional_move (dest, c, op0, op1, mode,
23178 op1, op0, mode, 0);
23179 gcc_assert (target);
23180 if (target != dest)
23181 emit_move_insn (dest, target);
23184 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23185 for the value to come from memory or if it is already loaded into a GPR. */
23187 void
23188 rs6000_split_signbit (rtx dest, rtx src)
23190 machine_mode d_mode = GET_MODE (dest);
23191 machine_mode s_mode = GET_MODE (src);
23192 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23193 rtx shift_reg = dest_di;
23195 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23197 if (MEM_P (src))
23199 rtx mem = (WORDS_BIG_ENDIAN
23200 ? adjust_address (src, DImode, 0)
23201 : adjust_address (src, DImode, 8));
23202 emit_insn (gen_rtx_SET (dest_di, mem));
23205 else
23207 unsigned int r = reg_or_subregno (src);
23209 if (INT_REGNO_P (r))
23210 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23212 else
23214 /* Generate the special mfvsrd instruction to get it in a GPR. */
23215 gcc_assert (VSX_REGNO_P (r));
23216 if (s_mode == KFmode)
23217 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23218 else
23219 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23223 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23224 return;
23227 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23228 COND is true. Mark the jump as unlikely to be taken. */
23230 static void
23231 emit_unlikely_jump (rtx cond, rtx label)
23233 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23234 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23235 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23238 /* A subroutine of the atomic operation splitters. Emit a load-locked
23239 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23240 the zero_extend operation. */
23242 static void
23243 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23245 rtx (*fn) (rtx, rtx) = NULL;
23247 switch (mode)
23249 case E_QImode:
23250 fn = gen_load_lockedqi;
23251 break;
23252 case E_HImode:
23253 fn = gen_load_lockedhi;
23254 break;
23255 case E_SImode:
23256 if (GET_MODE (mem) == QImode)
23257 fn = gen_load_lockedqi_si;
23258 else if (GET_MODE (mem) == HImode)
23259 fn = gen_load_lockedhi_si;
23260 else
23261 fn = gen_load_lockedsi;
23262 break;
23263 case E_DImode:
23264 fn = gen_load_lockeddi;
23265 break;
23266 case E_TImode:
23267 fn = gen_load_lockedti;
23268 break;
23269 default:
23270 gcc_unreachable ();
23272 emit_insn (fn (reg, mem));
23275 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23276 instruction in MODE. */
23278 static void
23279 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23281 rtx (*fn) (rtx, rtx, rtx) = NULL;
23283 switch (mode)
23285 case E_QImode:
23286 fn = gen_store_conditionalqi;
23287 break;
23288 case E_HImode:
23289 fn = gen_store_conditionalhi;
23290 break;
23291 case E_SImode:
23292 fn = gen_store_conditionalsi;
23293 break;
23294 case E_DImode:
23295 fn = gen_store_conditionaldi;
23296 break;
23297 case E_TImode:
23298 fn = gen_store_conditionalti;
23299 break;
23300 default:
23301 gcc_unreachable ();
23304 /* Emit sync before stwcx. to address PPC405 Erratum. */
23305 if (PPC405_ERRATUM77)
23306 emit_insn (gen_hwsync ());
23308 emit_insn (fn (res, mem, val));
23311 /* Expand barriers before and after a load_locked/store_cond sequence. */
23313 static rtx
23314 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23316 rtx addr = XEXP (mem, 0);
23318 if (!legitimate_indirect_address_p (addr, reload_completed)
23319 && !legitimate_indexed_address_p (addr, reload_completed))
23321 addr = force_reg (Pmode, addr);
23322 mem = replace_equiv_address_nv (mem, addr);
23325 switch (model)
23327 case MEMMODEL_RELAXED:
23328 case MEMMODEL_CONSUME:
23329 case MEMMODEL_ACQUIRE:
23330 break;
23331 case MEMMODEL_RELEASE:
23332 case MEMMODEL_ACQ_REL:
23333 emit_insn (gen_lwsync ());
23334 break;
23335 case MEMMODEL_SEQ_CST:
23336 emit_insn (gen_hwsync ());
23337 break;
23338 default:
23339 gcc_unreachable ();
23341 return mem;
23344 static void
23345 rs6000_post_atomic_barrier (enum memmodel model)
23347 switch (model)
23349 case MEMMODEL_RELAXED:
23350 case MEMMODEL_CONSUME:
23351 case MEMMODEL_RELEASE:
23352 break;
23353 case MEMMODEL_ACQUIRE:
23354 case MEMMODEL_ACQ_REL:
23355 case MEMMODEL_SEQ_CST:
23356 emit_insn (gen_isync ());
23357 break;
23358 default:
23359 gcc_unreachable ();
23363 /* A subroutine of the various atomic expanders. For sub-word operations,
23364 we must adjust things to operate on SImode. Given the original MEM,
23365 return a new aligned memory. Also build and return the quantities by
23366 which to shift and mask. */
23368 static rtx
23369 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23371 rtx addr, align, shift, mask, mem;
23372 HOST_WIDE_INT shift_mask;
23373 machine_mode mode = GET_MODE (orig_mem);
23375 /* For smaller modes, we have to implement this via SImode. */
23376 shift_mask = (mode == QImode ? 0x18 : 0x10);
23378 addr = XEXP (orig_mem, 0);
23379 addr = force_reg (GET_MODE (addr), addr);
23381 /* Aligned memory containing subword. Generate a new memory. We
23382 do not want any of the existing MEM_ATTR data, as we're now
23383 accessing memory outside the original object. */
23384 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23385 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23386 mem = gen_rtx_MEM (SImode, align);
23387 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23388 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23389 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23391 /* Shift amount for subword relative to aligned word. */
23392 shift = gen_reg_rtx (SImode);
23393 addr = gen_lowpart (SImode, addr);
23394 rtx tmp = gen_reg_rtx (SImode);
23395 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23396 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23397 if (BYTES_BIG_ENDIAN)
23398 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23399 shift, 1, OPTAB_LIB_WIDEN);
23400 *pshift = shift;
23402 /* Mask for insertion. */
23403 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23404 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23405 *pmask = mask;
23407 return mem;
23410 /* A subroutine of the various atomic expanders. For sub-word operands,
23411 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23413 static rtx
23414 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23416 rtx x;
23418 x = gen_reg_rtx (SImode);
23419 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23420 gen_rtx_NOT (SImode, mask),
23421 oldval)));
23423 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23425 return x;
23428 /* A subroutine of the various atomic expanders. For sub-word operands,
23429 extract WIDE to NARROW via SHIFT. */
23431 static void
23432 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23434 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23435 wide, 1, OPTAB_LIB_WIDEN);
23436 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23439 /* Expand an atomic compare and swap operation. */
23441 void
23442 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23444 rtx boolval, retval, mem, oldval, newval, cond;
23445 rtx label1, label2, x, mask, shift;
23446 machine_mode mode, orig_mode;
23447 enum memmodel mod_s, mod_f;
23448 bool is_weak;
23450 boolval = operands[0];
23451 retval = operands[1];
23452 mem = operands[2];
23453 oldval = operands[3];
23454 newval = operands[4];
23455 is_weak = (INTVAL (operands[5]) != 0);
23456 mod_s = memmodel_base (INTVAL (operands[6]));
23457 mod_f = memmodel_base (INTVAL (operands[7]));
23458 orig_mode = mode = GET_MODE (mem);
23460 mask = shift = NULL_RTX;
23461 if (mode == QImode || mode == HImode)
23463 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23464 lwarx and shift/mask operations. With power8, we need to do the
23465 comparison in SImode, but the store is still done in QI/HImode. */
23466 oldval = convert_modes (SImode, mode, oldval, 1);
23468 if (!TARGET_SYNC_HI_QI)
23470 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23472 /* Shift and mask OLDVAL into position with the word. */
23473 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23474 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23476 /* Shift and mask NEWVAL into position within the word. */
23477 newval = convert_modes (SImode, mode, newval, 1);
23478 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23479 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23482 /* Prepare to adjust the return value. */
23483 retval = gen_reg_rtx (SImode);
23484 mode = SImode;
23486 else if (reg_overlap_mentioned_p (retval, oldval))
23487 oldval = copy_to_reg (oldval);
23489 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23490 oldval = copy_to_mode_reg (mode, oldval);
23492 if (reg_overlap_mentioned_p (retval, newval))
23493 newval = copy_to_reg (newval);
23495 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23497 label1 = NULL_RTX;
23498 if (!is_weak)
23500 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23501 emit_label (XEXP (label1, 0));
23503 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23505 emit_load_locked (mode, retval, mem);
23507 x = retval;
23508 if (mask)
23509 x = expand_simple_binop (SImode, AND, retval, mask,
23510 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23512 cond = gen_reg_rtx (CCmode);
23513 /* If we have TImode, synthesize a comparison. */
23514 if (mode != TImode)
23515 x = gen_rtx_COMPARE (CCmode, x, oldval);
23516 else
23518 rtx xor1_result = gen_reg_rtx (DImode);
23519 rtx xor2_result = gen_reg_rtx (DImode);
23520 rtx or_result = gen_reg_rtx (DImode);
23521 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23522 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23523 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23524 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23526 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23527 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23528 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23529 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23532 emit_insn (gen_rtx_SET (cond, x));
23534 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23535 emit_unlikely_jump (x, label2);
23537 x = newval;
23538 if (mask)
23539 x = rs6000_mask_atomic_subword (retval, newval, mask);
23541 emit_store_conditional (orig_mode, cond, mem, x);
23543 if (!is_weak)
23545 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23546 emit_unlikely_jump (x, label1);
23549 if (!is_mm_relaxed (mod_f))
23550 emit_label (XEXP (label2, 0));
23552 rs6000_post_atomic_barrier (mod_s);
23554 if (is_mm_relaxed (mod_f))
23555 emit_label (XEXP (label2, 0));
23557 if (shift)
23558 rs6000_finish_atomic_subword (operands[1], retval, shift);
23559 else if (mode != GET_MODE (operands[1]))
23560 convert_move (operands[1], retval, 1);
23562 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23563 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23564 emit_insn (gen_rtx_SET (boolval, x));
23567 /* Expand an atomic exchange operation. */
23569 void
23570 rs6000_expand_atomic_exchange (rtx operands[])
23572 rtx retval, mem, val, cond;
23573 machine_mode mode;
23574 enum memmodel model;
23575 rtx label, x, mask, shift;
23577 retval = operands[0];
23578 mem = operands[1];
23579 val = operands[2];
23580 model = memmodel_base (INTVAL (operands[3]));
23581 mode = GET_MODE (mem);
23583 mask = shift = NULL_RTX;
23584 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23586 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23588 /* Shift and mask VAL into position with the word. */
23589 val = convert_modes (SImode, mode, val, 1);
23590 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23591 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23593 /* Prepare to adjust the return value. */
23594 retval = gen_reg_rtx (SImode);
23595 mode = SImode;
23598 mem = rs6000_pre_atomic_barrier (mem, model);
23600 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23601 emit_label (XEXP (label, 0));
23603 emit_load_locked (mode, retval, mem);
23605 x = val;
23606 if (mask)
23607 x = rs6000_mask_atomic_subword (retval, val, mask);
23609 cond = gen_reg_rtx (CCmode);
23610 emit_store_conditional (mode, cond, mem, x);
23612 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23613 emit_unlikely_jump (x, label);
23615 rs6000_post_atomic_barrier (model);
23617 if (shift)
23618 rs6000_finish_atomic_subword (operands[0], retval, shift);
23621 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23622 to perform. MEM is the memory on which to operate. VAL is the second
23623 operand of the binary operator. BEFORE and AFTER are optional locations to
23624 return the value of MEM either before of after the operation. MODEL_RTX
23625 is a CONST_INT containing the memory model to use. */
23627 void
23628 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23629 rtx orig_before, rtx orig_after, rtx model_rtx)
23631 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23632 machine_mode mode = GET_MODE (mem);
23633 machine_mode store_mode = mode;
23634 rtx label, x, cond, mask, shift;
23635 rtx before = orig_before, after = orig_after;
23637 mask = shift = NULL_RTX;
23638 /* On power8, we want to use SImode for the operation. On previous systems,
23639 use the operation in a subword and shift/mask to get the proper byte or
23640 halfword. */
23641 if (mode == QImode || mode == HImode)
23643 if (TARGET_SYNC_HI_QI)
23645 val = convert_modes (SImode, mode, val, 1);
23647 /* Prepare to adjust the return value. */
23648 before = gen_reg_rtx (SImode);
23649 if (after)
23650 after = gen_reg_rtx (SImode);
23651 mode = SImode;
23653 else
23655 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23657 /* Shift and mask VAL into position with the word. */
23658 val = convert_modes (SImode, mode, val, 1);
23659 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23660 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23662 switch (code)
23664 case IOR:
23665 case XOR:
23666 /* We've already zero-extended VAL. That is sufficient to
23667 make certain that it does not affect other bits. */
23668 mask = NULL;
23669 break;
23671 case AND:
23672 /* If we make certain that all of the other bits in VAL are
23673 set, that will be sufficient to not affect other bits. */
23674 x = gen_rtx_NOT (SImode, mask);
23675 x = gen_rtx_IOR (SImode, x, val);
23676 emit_insn (gen_rtx_SET (val, x));
23677 mask = NULL;
23678 break;
23680 case NOT:
23681 case PLUS:
23682 case MINUS:
23683 /* These will all affect bits outside the field and need
23684 adjustment via MASK within the loop. */
23685 break;
23687 default:
23688 gcc_unreachable ();
23691 /* Prepare to adjust the return value. */
23692 before = gen_reg_rtx (SImode);
23693 if (after)
23694 after = gen_reg_rtx (SImode);
23695 store_mode = mode = SImode;
23699 mem = rs6000_pre_atomic_barrier (mem, model);
23701 label = gen_label_rtx ();
23702 emit_label (label);
23703 label = gen_rtx_LABEL_REF (VOIDmode, label);
23705 if (before == NULL_RTX)
23706 before = gen_reg_rtx (mode);
23708 emit_load_locked (mode, before, mem);
23710 if (code == NOT)
23712 x = expand_simple_binop (mode, AND, before, val,
23713 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23714 after = expand_simple_unop (mode, NOT, x, after, 1);
23716 else
23718 after = expand_simple_binop (mode, code, before, val,
23719 after, 1, OPTAB_LIB_WIDEN);
23722 x = after;
23723 if (mask)
23725 x = expand_simple_binop (SImode, AND, after, mask,
23726 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23727 x = rs6000_mask_atomic_subword (before, x, mask);
23729 else if (store_mode != mode)
23730 x = convert_modes (store_mode, mode, x, 1);
23732 cond = gen_reg_rtx (CCmode);
23733 emit_store_conditional (store_mode, cond, mem, x);
23735 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23736 emit_unlikely_jump (x, label);
23738 rs6000_post_atomic_barrier (model);
23740 if (shift)
23742 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23743 then do the calcuations in a SImode register. */
23744 if (orig_before)
23745 rs6000_finish_atomic_subword (orig_before, before, shift);
23746 if (orig_after)
23747 rs6000_finish_atomic_subword (orig_after, after, shift);
23749 else if (store_mode != mode)
23751 /* QImode/HImode on machines with lbarx/lharx where we do the native
23752 operation and then do the calcuations in a SImode register. */
23753 if (orig_before)
23754 convert_move (orig_before, before, 1);
23755 if (orig_after)
23756 convert_move (orig_after, after, 1);
23758 else if (orig_after && after != orig_after)
23759 emit_move_insn (orig_after, after);
23762 /* Emit instructions to move SRC to DST. Called by splitters for
23763 multi-register moves. It will emit at most one instruction for
23764 each register that is accessed; that is, it won't emit li/lis pairs
23765 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23766 register. */
23768 void
23769 rs6000_split_multireg_move (rtx dst, rtx src)
23771 /* The register number of the first register being moved. */
23772 int reg;
23773 /* The mode that is to be moved. */
23774 machine_mode mode;
23775 /* The mode that the move is being done in, and its size. */
23776 machine_mode reg_mode;
23777 int reg_mode_size;
23778 /* The number of registers that will be moved. */
23779 int nregs;
23781 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23782 mode = GET_MODE (dst);
23783 nregs = hard_regno_nregs[reg][mode];
23784 if (FP_REGNO_P (reg))
23785 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23786 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23787 else if (ALTIVEC_REGNO_P (reg))
23788 reg_mode = V16QImode;
23789 else
23790 reg_mode = word_mode;
23791 reg_mode_size = GET_MODE_SIZE (reg_mode);
23793 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23795 /* TDmode residing in FP registers is special, since the ISA requires that
23796 the lower-numbered word of a register pair is always the most significant
23797 word, even in little-endian mode. This does not match the usual subreg
23798 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23799 the appropriate constituent registers "by hand" in little-endian mode.
23801 Note we do not need to check for destructive overlap here since TDmode
23802 can only reside in even/odd register pairs. */
23803 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23805 rtx p_src, p_dst;
23806 int i;
23808 for (i = 0; i < nregs; i++)
23810 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23811 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23812 else
23813 p_src = simplify_gen_subreg (reg_mode, src, mode,
23814 i * reg_mode_size);
23816 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23817 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23818 else
23819 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23820 i * reg_mode_size);
23822 emit_insn (gen_rtx_SET (p_dst, p_src));
23825 return;
23828 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23830 /* Move register range backwards, if we might have destructive
23831 overlap. */
23832 int i;
23833 for (i = nregs - 1; i >= 0; i--)
23834 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23835 i * reg_mode_size),
23836 simplify_gen_subreg (reg_mode, src, mode,
23837 i * reg_mode_size)));
23839 else
23841 int i;
23842 int j = -1;
23843 bool used_update = false;
23844 rtx restore_basereg = NULL_RTX;
23846 if (MEM_P (src) && INT_REGNO_P (reg))
23848 rtx breg;
23850 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23851 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23853 rtx delta_rtx;
23854 breg = XEXP (XEXP (src, 0), 0);
23855 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23856 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23857 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23858 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23859 src = replace_equiv_address (src, breg);
23861 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23863 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23865 rtx basereg = XEXP (XEXP (src, 0), 0);
23866 if (TARGET_UPDATE)
23868 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23869 emit_insn (gen_rtx_SET (ndst,
23870 gen_rtx_MEM (reg_mode,
23871 XEXP (src, 0))));
23872 used_update = true;
23874 else
23875 emit_insn (gen_rtx_SET (basereg,
23876 XEXP (XEXP (src, 0), 1)));
23877 src = replace_equiv_address (src, basereg);
23879 else
23881 rtx basereg = gen_rtx_REG (Pmode, reg);
23882 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23883 src = replace_equiv_address (src, basereg);
23887 breg = XEXP (src, 0);
23888 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23889 breg = XEXP (breg, 0);
23891 /* If the base register we are using to address memory is
23892 also a destination reg, then change that register last. */
23893 if (REG_P (breg)
23894 && REGNO (breg) >= REGNO (dst)
23895 && REGNO (breg) < REGNO (dst) + nregs)
23896 j = REGNO (breg) - REGNO (dst);
23898 else if (MEM_P (dst) && INT_REGNO_P (reg))
23900 rtx breg;
23902 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23903 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23905 rtx delta_rtx;
23906 breg = XEXP (XEXP (dst, 0), 0);
23907 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23908 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23909 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23911 /* We have to update the breg before doing the store.
23912 Use store with update, if available. */
23914 if (TARGET_UPDATE)
23916 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23917 emit_insn (TARGET_32BIT
23918 ? (TARGET_POWERPC64
23919 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23920 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23921 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23922 used_update = true;
23924 else
23925 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23926 dst = replace_equiv_address (dst, breg);
23928 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23929 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23931 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23933 rtx basereg = XEXP (XEXP (dst, 0), 0);
23934 if (TARGET_UPDATE)
23936 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23937 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23938 XEXP (dst, 0)),
23939 nsrc));
23940 used_update = true;
23942 else
23943 emit_insn (gen_rtx_SET (basereg,
23944 XEXP (XEXP (dst, 0), 1)));
23945 dst = replace_equiv_address (dst, basereg);
23947 else
23949 rtx basereg = XEXP (XEXP (dst, 0), 0);
23950 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23951 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23952 && REG_P (basereg)
23953 && REG_P (offsetreg)
23954 && REGNO (basereg) != REGNO (offsetreg));
23955 if (REGNO (basereg) == 0)
23957 rtx tmp = offsetreg;
23958 offsetreg = basereg;
23959 basereg = tmp;
23961 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23962 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23963 dst = replace_equiv_address (dst, basereg);
23966 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23967 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
23970 for (i = 0; i < nregs; i++)
23972 /* Calculate index to next subword. */
23973 ++j;
23974 if (j == nregs)
23975 j = 0;
23977 /* If compiler already emitted move of first word by
23978 store with update, no need to do anything. */
23979 if (j == 0 && used_update)
23980 continue;
23982 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23983 j * reg_mode_size),
23984 simplify_gen_subreg (reg_mode, src, mode,
23985 j * reg_mode_size)));
23987 if (restore_basereg != NULL_RTX)
23988 emit_insn (restore_basereg);
23993 /* This page contains routines that are used to determine what the
23994 function prologue and epilogue code will do and write them out. */
23996 /* Determine whether the REG is really used. */
23998 static bool
23999 save_reg_p (int reg)
24001 /* We need to mark the PIC offset register live for the same conditions
24002 as it is set up, or otherwise it won't be saved before we clobber it. */
24004 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24006 /* When calling eh_return, we must return true for all the cases
24007 where conditional_register_usage marks the PIC offset reg
24008 call used. */
24009 if (TARGET_TOC && TARGET_MINIMAL_TOC
24010 && (crtl->calls_eh_return
24011 || df_regs_ever_live_p (reg)
24012 || !constant_pool_empty_p ()))
24013 return true;
24015 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24016 && flag_pic)
24017 return true;
24020 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24023 /* Return the first fixed-point register that is required to be
24024 saved. 32 if none. */
24027 first_reg_to_save (void)
24029 int first_reg;
24031 /* Find lowest numbered live register. */
24032 for (first_reg = 13; first_reg <= 31; first_reg++)
24033 if (save_reg_p (first_reg))
24034 break;
24036 #if TARGET_MACHO
24037 if (flag_pic
24038 && crtl->uses_pic_offset_table
24039 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24040 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24041 #endif
24043 return first_reg;
24046 /* Similar, for FP regs. */
24049 first_fp_reg_to_save (void)
24051 int first_reg;
24053 /* Find lowest numbered live register. */
24054 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24055 if (save_reg_p (first_reg))
24056 break;
24058 return first_reg;
24061 /* Similar, for AltiVec regs. */
24063 static int
24064 first_altivec_reg_to_save (void)
24066 int i;
24068 /* Stack frame remains as is unless we are in AltiVec ABI. */
24069 if (! TARGET_ALTIVEC_ABI)
24070 return LAST_ALTIVEC_REGNO + 1;
24072 /* On Darwin, the unwind routines are compiled without
24073 TARGET_ALTIVEC, and use save_world to save/restore the
24074 altivec registers when necessary. */
24075 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24076 && ! TARGET_ALTIVEC)
24077 return FIRST_ALTIVEC_REGNO + 20;
24079 /* Find lowest numbered live register. */
24080 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24081 if (save_reg_p (i))
24082 break;
24084 return i;
24087 /* Return a 32-bit mask of the AltiVec registers we need to set in
24088 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24089 the 32-bit word is 0. */
24091 static unsigned int
24092 compute_vrsave_mask (void)
24094 unsigned int i, mask = 0;
24096 /* On Darwin, the unwind routines are compiled without
24097 TARGET_ALTIVEC, and use save_world to save/restore the
24098 call-saved altivec registers when necessary. */
24099 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24100 && ! TARGET_ALTIVEC)
24101 mask |= 0xFFF;
24103 /* First, find out if we use _any_ altivec registers. */
24104 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24105 if (df_regs_ever_live_p (i))
24106 mask |= ALTIVEC_REG_BIT (i);
24108 if (mask == 0)
24109 return mask;
24111 /* Next, remove the argument registers from the set. These must
24112 be in the VRSAVE mask set by the caller, so we don't need to add
24113 them in again. More importantly, the mask we compute here is
24114 used to generate CLOBBERs in the set_vrsave insn, and we do not
24115 wish the argument registers to die. */
24116 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24117 mask &= ~ALTIVEC_REG_BIT (i);
24119 /* Similarly, remove the return value from the set. */
24121 bool yes = false;
24122 diddle_return_value (is_altivec_return_reg, &yes);
24123 if (yes)
24124 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24127 return mask;
24130 /* For a very restricted set of circumstances, we can cut down the
24131 size of prologues/epilogues by calling our own save/restore-the-world
24132 routines. */
24134 static void
24135 compute_save_world_info (rs6000_stack_t *info)
24137 info->world_save_p = 1;
24138 info->world_save_p
24139 = (WORLD_SAVE_P (info)
24140 && DEFAULT_ABI == ABI_DARWIN
24141 && !cfun->has_nonlocal_label
24142 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24143 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24144 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24145 && info->cr_save_p);
24147 /* This will not work in conjunction with sibcalls. Make sure there
24148 are none. (This check is expensive, but seldom executed.) */
24149 if (WORLD_SAVE_P (info))
24151 rtx_insn *insn;
24152 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24153 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24155 info->world_save_p = 0;
24156 break;
24160 if (WORLD_SAVE_P (info))
24162 /* Even if we're not touching VRsave, make sure there's room on the
24163 stack for it, if it looks like we're calling SAVE_WORLD, which
24164 will attempt to save it. */
24165 info->vrsave_size = 4;
24167 /* If we are going to save the world, we need to save the link register too. */
24168 info->lr_save_p = 1;
24170 /* "Save" the VRsave register too if we're saving the world. */
24171 if (info->vrsave_mask == 0)
24172 info->vrsave_mask = compute_vrsave_mask ();
24174 /* Because the Darwin register save/restore routines only handle
24175 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24176 check. */
24177 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24178 && (info->first_altivec_reg_save
24179 >= FIRST_SAVED_ALTIVEC_REGNO));
24182 return;
24186 static void
24187 is_altivec_return_reg (rtx reg, void *xyes)
24189 bool *yes = (bool *) xyes;
24190 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24191 *yes = true;
24195 /* Return whether REG is a global user reg or has been specifed by
24196 -ffixed-REG. We should not restore these, and so cannot use
24197 lmw or out-of-line restore functions if there are any. We also
24198 can't save them (well, emit frame notes for them), because frame
24199 unwinding during exception handling will restore saved registers. */
24201 static bool
24202 fixed_reg_p (int reg)
24204 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24205 backend sets it, overriding anything the user might have given. */
24206 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24207 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24208 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24209 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24210 return false;
24212 return fixed_regs[reg];
24215 /* Determine the strategy for savings/restoring registers. */
24217 enum {
24218 SAVE_MULTIPLE = 0x1,
24219 SAVE_INLINE_GPRS = 0x2,
24220 SAVE_INLINE_FPRS = 0x4,
24221 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24222 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24223 SAVE_INLINE_VRS = 0x20,
24224 REST_MULTIPLE = 0x100,
24225 REST_INLINE_GPRS = 0x200,
24226 REST_INLINE_FPRS = 0x400,
24227 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24228 REST_INLINE_VRS = 0x1000
24231 static int
24232 rs6000_savres_strategy (rs6000_stack_t *info,
24233 bool using_static_chain_p)
24235 int strategy = 0;
24237 /* Select between in-line and out-of-line save and restore of regs.
24238 First, all the obvious cases where we don't use out-of-line. */
24239 if (crtl->calls_eh_return
24240 || cfun->machine->ra_need_lr)
24241 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24242 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24243 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24245 if (info->first_gp_reg_save == 32)
24246 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24248 if (info->first_fp_reg_save == 64
24249 /* The out-of-line FP routines use double-precision stores;
24250 we can't use those routines if we don't have such stores. */
24251 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24252 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24254 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24255 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24257 /* Define cutoff for using out-of-line functions to save registers. */
24258 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24260 if (!optimize_size)
24262 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24263 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24264 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24266 else
24268 /* Prefer out-of-line restore if it will exit. */
24269 if (info->first_fp_reg_save > 61)
24270 strategy |= SAVE_INLINE_FPRS;
24271 if (info->first_gp_reg_save > 29)
24273 if (info->first_fp_reg_save == 64)
24274 strategy |= SAVE_INLINE_GPRS;
24275 else
24276 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24278 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24279 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24282 else if (DEFAULT_ABI == ABI_DARWIN)
24284 if (info->first_fp_reg_save > 60)
24285 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24286 if (info->first_gp_reg_save > 29)
24287 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24288 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24290 else
24292 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24293 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24294 || info->first_fp_reg_save > 61)
24295 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24296 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24297 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24300 /* Don't bother to try to save things out-of-line if r11 is occupied
24301 by the static chain. It would require too much fiddling and the
24302 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24303 pointer on Darwin, and AIX uses r1 or r12. */
24304 if (using_static_chain_p
24305 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24306 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24307 | SAVE_INLINE_GPRS
24308 | SAVE_INLINE_VRS);
24310 /* Don't ever restore fixed regs. That means we can't use the
24311 out-of-line register restore functions if a fixed reg is in the
24312 range of regs restored. */
24313 if (!(strategy & REST_INLINE_FPRS))
24314 for (int i = info->first_fp_reg_save; i < 64; i++)
24315 if (fixed_regs[i])
24317 strategy |= REST_INLINE_FPRS;
24318 break;
24321 /* We can only use the out-of-line routines to restore fprs if we've
24322 saved all the registers from first_fp_reg_save in the prologue.
24323 Otherwise, we risk loading garbage. Of course, if we have saved
24324 out-of-line then we know we haven't skipped any fprs. */
24325 if ((strategy & SAVE_INLINE_FPRS)
24326 && !(strategy & REST_INLINE_FPRS))
24327 for (int i = info->first_fp_reg_save; i < 64; i++)
24328 if (!save_reg_p (i))
24330 strategy |= REST_INLINE_FPRS;
24331 break;
24334 /* Similarly, for altivec regs. */
24335 if (!(strategy & REST_INLINE_VRS))
24336 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24337 if (fixed_regs[i])
24339 strategy |= REST_INLINE_VRS;
24340 break;
24343 if ((strategy & SAVE_INLINE_VRS)
24344 && !(strategy & REST_INLINE_VRS))
24345 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24346 if (!save_reg_p (i))
24348 strategy |= REST_INLINE_VRS;
24349 break;
24352 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24353 saved is an out-of-line save or restore. Set up the value for
24354 the next test (excluding out-of-line gprs). */
24355 bool lr_save_p = (info->lr_save_p
24356 || !(strategy & SAVE_INLINE_FPRS)
24357 || !(strategy & SAVE_INLINE_VRS)
24358 || !(strategy & REST_INLINE_FPRS)
24359 || !(strategy & REST_INLINE_VRS));
24361 if (TARGET_MULTIPLE
24362 && !TARGET_POWERPC64
24363 && info->first_gp_reg_save < 31
24364 && !(flag_shrink_wrap
24365 && flag_shrink_wrap_separate
24366 && optimize_function_for_speed_p (cfun)))
24368 int count = 0;
24369 for (int i = info->first_gp_reg_save; i < 32; i++)
24370 if (save_reg_p (i))
24371 count++;
24373 if (count <= 1)
24374 /* Don't use store multiple if only one reg needs to be
24375 saved. This can occur for example when the ABI_V4 pic reg
24376 (r30) needs to be saved to make calls, but r31 is not
24377 used. */
24378 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24379 else
24381 /* Prefer store multiple for saves over out-of-line
24382 routines, since the store-multiple instruction will
24383 always be smaller. */
24384 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24386 /* The situation is more complicated with load multiple.
24387 We'd prefer to use the out-of-line routines for restores,
24388 since the "exit" out-of-line routines can handle the
24389 restore of LR and the frame teardown. However if doesn't
24390 make sense to use the out-of-line routine if that is the
24391 only reason we'd need to save LR, and we can't use the
24392 "exit" out-of-line gpr restore if we have saved some
24393 fprs; In those cases it is advantageous to use load
24394 multiple when available. */
24395 if (info->first_fp_reg_save != 64 || !lr_save_p)
24396 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24400 /* Using the "exit" out-of-line routine does not improve code size
24401 if using it would require lr to be saved and if only saving one
24402 or two gprs. */
24403 else if (!lr_save_p && info->first_gp_reg_save > 29)
24404 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24406 /* Don't ever restore fixed regs. */
24407 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24408 for (int i = info->first_gp_reg_save; i < 32; i++)
24409 if (fixed_reg_p (i))
24411 strategy |= REST_INLINE_GPRS;
24412 strategy &= ~REST_MULTIPLE;
24413 break;
24416 /* We can only use load multiple or the out-of-line routines to
24417 restore gprs if we've saved all the registers from
24418 first_gp_reg_save. Otherwise, we risk loading garbage.
24419 Of course, if we have saved out-of-line or used stmw then we know
24420 we haven't skipped any gprs. */
24421 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24422 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24423 for (int i = info->first_gp_reg_save; i < 32; i++)
24424 if (!save_reg_p (i))
24426 strategy |= REST_INLINE_GPRS;
24427 strategy &= ~REST_MULTIPLE;
24428 break;
24431 if (TARGET_ELF && TARGET_64BIT)
24433 if (!(strategy & SAVE_INLINE_FPRS))
24434 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24435 else if (!(strategy & SAVE_INLINE_GPRS)
24436 && info->first_fp_reg_save == 64)
24437 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24439 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24440 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24442 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24443 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24445 return strategy;
24448 /* Calculate the stack information for the current function. This is
24449 complicated by having two separate calling sequences, the AIX calling
24450 sequence and the V.4 calling sequence.
24452 AIX (and Darwin/Mac OS X) stack frames look like:
24453 32-bit 64-bit
24454 SP----> +---------------------------------------+
24455 | back chain to caller | 0 0
24456 +---------------------------------------+
24457 | saved CR | 4 8 (8-11)
24458 +---------------------------------------+
24459 | saved LR | 8 16
24460 +---------------------------------------+
24461 | reserved for compilers | 12 24
24462 +---------------------------------------+
24463 | reserved for binders | 16 32
24464 +---------------------------------------+
24465 | saved TOC pointer | 20 40
24466 +---------------------------------------+
24467 | Parameter save area (+padding*) (P) | 24 48
24468 +---------------------------------------+
24469 | Alloca space (A) | 24+P etc.
24470 +---------------------------------------+
24471 | Local variable space (L) | 24+P+A
24472 +---------------------------------------+
24473 | Float/int conversion temporary (X) | 24+P+A+L
24474 +---------------------------------------+
24475 | Save area for AltiVec registers (W) | 24+P+A+L+X
24476 +---------------------------------------+
24477 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24478 +---------------------------------------+
24479 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24480 +---------------------------------------+
24481 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24482 +---------------------------------------+
24483 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24484 +---------------------------------------+
24485 old SP->| back chain to caller's caller |
24486 +---------------------------------------+
24488 * If the alloca area is present, the parameter save area is
24489 padded so that the former starts 16-byte aligned.
24491 The required alignment for AIX configurations is two words (i.e., 8
24492 or 16 bytes).
24494 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24496 SP----> +---------------------------------------+
24497 | Back chain to caller | 0
24498 +---------------------------------------+
24499 | Save area for CR | 8
24500 +---------------------------------------+
24501 | Saved LR | 16
24502 +---------------------------------------+
24503 | Saved TOC pointer | 24
24504 +---------------------------------------+
24505 | Parameter save area (+padding*) (P) | 32
24506 +---------------------------------------+
24507 | Alloca space (A) | 32+P
24508 +---------------------------------------+
24509 | Local variable space (L) | 32+P+A
24510 +---------------------------------------+
24511 | Save area for AltiVec registers (W) | 32+P+A+L
24512 +---------------------------------------+
24513 | AltiVec alignment padding (Y) | 32+P+A+L+W
24514 +---------------------------------------+
24515 | Save area for GP registers (G) | 32+P+A+L+W+Y
24516 +---------------------------------------+
24517 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24518 +---------------------------------------+
24519 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24520 +---------------------------------------+
24522 * If the alloca area is present, the parameter save area is
24523 padded so that the former starts 16-byte aligned.
24525 V.4 stack frames look like:
24527 SP----> +---------------------------------------+
24528 | back chain to caller | 0
24529 +---------------------------------------+
24530 | caller's saved LR | 4
24531 +---------------------------------------+
24532 | Parameter save area (+padding*) (P) | 8
24533 +---------------------------------------+
24534 | Alloca space (A) | 8+P
24535 +---------------------------------------+
24536 | Varargs save area (V) | 8+P+A
24537 +---------------------------------------+
24538 | Local variable space (L) | 8+P+A+V
24539 +---------------------------------------+
24540 | Float/int conversion temporary (X) | 8+P+A+V+L
24541 +---------------------------------------+
24542 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24543 +---------------------------------------+
24544 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24545 +---------------------------------------+
24546 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24547 +---------------------------------------+
24548 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24549 +---------------------------------------+
24550 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24551 +---------------------------------------+
24552 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24553 +---------------------------------------+
24554 old SP->| back chain to caller's caller |
24555 +---------------------------------------+
24557 * If the alloca area is present and the required alignment is
24558 16 bytes, the parameter save area is padded so that the
24559 alloca area starts 16-byte aligned.
24561 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24562 given. (But note below and in sysv4.h that we require only 8 and
24563 may round up the size of our stack frame anyways. The historical
24564 reason is early versions of powerpc-linux which didn't properly
24565 align the stack at program startup. A happy side-effect is that
24566 -mno-eabi libraries can be used with -meabi programs.)
24568 The EABI configuration defaults to the V.4 layout. However,
24569 the stack alignment requirements may differ. If -mno-eabi is not
24570 given, the required stack alignment is 8 bytes; if -mno-eabi is
24571 given, the required alignment is 16 bytes. (But see V.4 comment
24572 above.) */
24574 #ifndef ABI_STACK_BOUNDARY
24575 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24576 #endif
24578 static rs6000_stack_t *
24579 rs6000_stack_info (void)
24581 /* We should never be called for thunks, we are not set up for that. */
24582 gcc_assert (!cfun->is_thunk);
24584 rs6000_stack_t *info = &stack_info;
24585 int reg_size = TARGET_32BIT ? 4 : 8;
24586 int ehrd_size;
24587 int ehcr_size;
24588 int save_align;
24589 int first_gp;
24590 HOST_WIDE_INT non_fixed_size;
24591 bool using_static_chain_p;
24593 if (reload_completed && info->reload_completed)
24594 return info;
24596 memset (info, 0, sizeof (*info));
24597 info->reload_completed = reload_completed;
24599 /* Select which calling sequence. */
24600 info->abi = DEFAULT_ABI;
24602 /* Calculate which registers need to be saved & save area size. */
24603 info->first_gp_reg_save = first_reg_to_save ();
24604 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24605 even if it currently looks like we won't. Reload may need it to
24606 get at a constant; if so, it will have already created a constant
24607 pool entry for it. */
24608 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24609 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24610 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24611 && crtl->uses_const_pool
24612 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24613 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24614 else
24615 first_gp = info->first_gp_reg_save;
24617 info->gp_size = reg_size * (32 - first_gp);
24619 info->first_fp_reg_save = first_fp_reg_to_save ();
24620 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24622 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24623 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24624 - info->first_altivec_reg_save);
24626 /* Does this function call anything? */
24627 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24629 /* Determine if we need to save the condition code registers. */
24630 if (save_reg_p (CR2_REGNO)
24631 || save_reg_p (CR3_REGNO)
24632 || save_reg_p (CR4_REGNO))
24634 info->cr_save_p = 1;
24635 if (DEFAULT_ABI == ABI_V4)
24636 info->cr_size = reg_size;
24639 /* If the current function calls __builtin_eh_return, then we need
24640 to allocate stack space for registers that will hold data for
24641 the exception handler. */
24642 if (crtl->calls_eh_return)
24644 unsigned int i;
24645 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24646 continue;
24648 ehrd_size = i * UNITS_PER_WORD;
24650 else
24651 ehrd_size = 0;
24653 /* In the ELFv2 ABI, we also need to allocate space for separate
24654 CR field save areas if the function calls __builtin_eh_return. */
24655 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24657 /* This hard-codes that we have three call-saved CR fields. */
24658 ehcr_size = 3 * reg_size;
24659 /* We do *not* use the regular CR save mechanism. */
24660 info->cr_save_p = 0;
24662 else
24663 ehcr_size = 0;
24665 /* Determine various sizes. */
24666 info->reg_size = reg_size;
24667 info->fixed_size = RS6000_SAVE_AREA;
24668 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24669 if (cfun->calls_alloca)
24670 info->parm_size =
24671 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24672 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24673 else
24674 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24675 TARGET_ALTIVEC ? 16 : 8);
24676 if (FRAME_GROWS_DOWNWARD)
24677 info->vars_size
24678 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24679 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24680 - (info->fixed_size + info->vars_size + info->parm_size);
24682 if (TARGET_ALTIVEC_ABI)
24683 info->vrsave_mask = compute_vrsave_mask ();
24685 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24686 info->vrsave_size = 4;
24688 compute_save_world_info (info);
24690 /* Calculate the offsets. */
24691 switch (DEFAULT_ABI)
24693 case ABI_NONE:
24694 default:
24695 gcc_unreachable ();
24697 case ABI_AIX:
24698 case ABI_ELFv2:
24699 case ABI_DARWIN:
24700 info->fp_save_offset = -info->fp_size;
24701 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24703 if (TARGET_ALTIVEC_ABI)
24705 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24707 /* Align stack so vector save area is on a quadword boundary.
24708 The padding goes above the vectors. */
24709 if (info->altivec_size != 0)
24710 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24712 info->altivec_save_offset = info->vrsave_save_offset
24713 - info->altivec_padding_size
24714 - info->altivec_size;
24715 gcc_assert (info->altivec_size == 0
24716 || info->altivec_save_offset % 16 == 0);
24718 /* Adjust for AltiVec case. */
24719 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24721 else
24722 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24724 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24725 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24726 info->lr_save_offset = 2*reg_size;
24727 break;
24729 case ABI_V4:
24730 info->fp_save_offset = -info->fp_size;
24731 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24732 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24734 if (TARGET_ALTIVEC_ABI)
24736 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24738 /* Align stack so vector save area is on a quadword boundary. */
24739 if (info->altivec_size != 0)
24740 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24742 info->altivec_save_offset = info->vrsave_save_offset
24743 - info->altivec_padding_size
24744 - info->altivec_size;
24746 /* Adjust for AltiVec case. */
24747 info->ehrd_offset = info->altivec_save_offset;
24749 else
24750 info->ehrd_offset = info->cr_save_offset;
24752 info->ehrd_offset -= ehrd_size;
24753 info->lr_save_offset = reg_size;
24756 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24757 info->save_size = RS6000_ALIGN (info->fp_size
24758 + info->gp_size
24759 + info->altivec_size
24760 + info->altivec_padding_size
24761 + ehrd_size
24762 + ehcr_size
24763 + info->cr_size
24764 + info->vrsave_size,
24765 save_align);
24767 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24769 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24770 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24772 /* Determine if we need to save the link register. */
24773 if (info->calls_p
24774 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24775 && crtl->profile
24776 && !TARGET_PROFILE_KERNEL)
24777 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24778 #ifdef TARGET_RELOCATABLE
24779 || (DEFAULT_ABI == ABI_V4
24780 && (TARGET_RELOCATABLE || flag_pic > 1)
24781 && !constant_pool_empty_p ())
24782 #endif
24783 || rs6000_ra_ever_killed ())
24784 info->lr_save_p = 1;
24786 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24787 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24788 && call_used_regs[STATIC_CHAIN_REGNUM]);
24789 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24791 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24792 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24793 || !(info->savres_strategy & SAVE_INLINE_VRS)
24794 || !(info->savres_strategy & REST_INLINE_GPRS)
24795 || !(info->savres_strategy & REST_INLINE_FPRS)
24796 || !(info->savres_strategy & REST_INLINE_VRS))
24797 info->lr_save_p = 1;
24799 if (info->lr_save_p)
24800 df_set_regs_ever_live (LR_REGNO, true);
24802 /* Determine if we need to allocate any stack frame:
24804 For AIX we need to push the stack if a frame pointer is needed
24805 (because the stack might be dynamically adjusted), if we are
24806 debugging, if we make calls, or if the sum of fp_save, gp_save,
24807 and local variables are more than the space needed to save all
24808 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24809 + 18*8 = 288 (GPR13 reserved).
24811 For V.4 we don't have the stack cushion that AIX uses, but assume
24812 that the debugger can handle stackless frames. */
24814 if (info->calls_p)
24815 info->push_p = 1;
24817 else if (DEFAULT_ABI == ABI_V4)
24818 info->push_p = non_fixed_size != 0;
24820 else if (frame_pointer_needed)
24821 info->push_p = 1;
24823 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24824 info->push_p = 1;
24826 else
24827 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24829 return info;
24832 static void
24833 debug_stack_info (rs6000_stack_t *info)
24835 const char *abi_string;
24837 if (! info)
24838 info = rs6000_stack_info ();
24840 fprintf (stderr, "\nStack information for function %s:\n",
24841 ((current_function_decl && DECL_NAME (current_function_decl))
24842 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24843 : "<unknown>"));
24845 switch (info->abi)
24847 default: abi_string = "Unknown"; break;
24848 case ABI_NONE: abi_string = "NONE"; break;
24849 case ABI_AIX: abi_string = "AIX"; break;
24850 case ABI_ELFv2: abi_string = "ELFv2"; break;
24851 case ABI_DARWIN: abi_string = "Darwin"; break;
24852 case ABI_V4: abi_string = "V.4"; break;
24855 fprintf (stderr, "\tABI = %5s\n", abi_string);
24857 if (TARGET_ALTIVEC_ABI)
24858 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24860 if (info->first_gp_reg_save != 32)
24861 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24863 if (info->first_fp_reg_save != 64)
24864 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24866 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24867 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24868 info->first_altivec_reg_save);
24870 if (info->lr_save_p)
24871 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24873 if (info->cr_save_p)
24874 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24876 if (info->vrsave_mask)
24877 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24879 if (info->push_p)
24880 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24882 if (info->calls_p)
24883 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24885 if (info->gp_size)
24886 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24888 if (info->fp_size)
24889 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24891 if (info->altivec_size)
24892 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24893 info->altivec_save_offset);
24895 if (info->vrsave_size)
24896 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24897 info->vrsave_save_offset);
24899 if (info->lr_save_p)
24900 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24902 if (info->cr_save_p)
24903 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24905 if (info->varargs_save_offset)
24906 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24908 if (info->total_size)
24909 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24910 info->total_size);
24912 if (info->vars_size)
24913 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24914 info->vars_size);
24916 if (info->parm_size)
24917 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24919 if (info->fixed_size)
24920 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24922 if (info->gp_size)
24923 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24925 if (info->fp_size)
24926 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24928 if (info->altivec_size)
24929 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24931 if (info->vrsave_size)
24932 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24934 if (info->altivec_padding_size)
24935 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24936 info->altivec_padding_size);
24938 if (info->cr_size)
24939 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24941 if (info->save_size)
24942 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24944 if (info->reg_size != 4)
24945 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24947 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24949 fprintf (stderr, "\n");
24953 rs6000_return_addr (int count, rtx frame)
24955 /* Currently we don't optimize very well between prolog and body
24956 code and for PIC code the code can be actually quite bad, so
24957 don't try to be too clever here. */
24958 if (count != 0
24959 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24961 cfun->machine->ra_needs_full_frame = 1;
24963 return
24964 gen_rtx_MEM
24965 (Pmode,
24966 memory_address
24967 (Pmode,
24968 plus_constant (Pmode,
24969 copy_to_reg
24970 (gen_rtx_MEM (Pmode,
24971 memory_address (Pmode, frame))),
24972 RETURN_ADDRESS_OFFSET)));
24975 cfun->machine->ra_need_lr = 1;
24976 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24979 /* Say whether a function is a candidate for sibcall handling or not. */
24981 static bool
24982 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24984 tree fntype;
24986 if (decl)
24987 fntype = TREE_TYPE (decl);
24988 else
24989 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24991 /* We can't do it if the called function has more vector parameters
24992 than the current function; there's nowhere to put the VRsave code. */
24993 if (TARGET_ALTIVEC_ABI
24994 && TARGET_ALTIVEC_VRSAVE
24995 && !(decl && decl == current_function_decl))
24997 function_args_iterator args_iter;
24998 tree type;
24999 int nvreg = 0;
25001 /* Functions with vector parameters are required to have a
25002 prototype, so the argument type info must be available
25003 here. */
25004 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25005 if (TREE_CODE (type) == VECTOR_TYPE
25006 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25007 nvreg++;
25009 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25010 if (TREE_CODE (type) == VECTOR_TYPE
25011 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25012 nvreg--;
25014 if (nvreg > 0)
25015 return false;
25018 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25019 functions, because the callee may have a different TOC pointer to
25020 the caller and there's no way to ensure we restore the TOC when
25021 we return. With the secure-plt SYSV ABI we can't make non-local
25022 calls when -fpic/PIC because the plt call stubs use r30. */
25023 if (DEFAULT_ABI == ABI_DARWIN
25024 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25025 && decl
25026 && !DECL_EXTERNAL (decl)
25027 && !DECL_WEAK (decl)
25028 && (*targetm.binds_local_p) (decl))
25029 || (DEFAULT_ABI == ABI_V4
25030 && (!TARGET_SECURE_PLT
25031 || !flag_pic
25032 || (decl
25033 && (*targetm.binds_local_p) (decl)))))
25035 tree attr_list = TYPE_ATTRIBUTES (fntype);
25037 if (!lookup_attribute ("longcall", attr_list)
25038 || lookup_attribute ("shortcall", attr_list))
25039 return true;
25042 return false;
25045 static int
25046 rs6000_ra_ever_killed (void)
25048 rtx_insn *top;
25049 rtx reg;
25050 rtx_insn *insn;
25052 if (cfun->is_thunk)
25053 return 0;
25055 if (cfun->machine->lr_save_state)
25056 return cfun->machine->lr_save_state - 1;
25058 /* regs_ever_live has LR marked as used if any sibcalls are present,
25059 but this should not force saving and restoring in the
25060 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25061 clobbers LR, so that is inappropriate. */
25063 /* Also, the prologue can generate a store into LR that
25064 doesn't really count, like this:
25066 move LR->R0
25067 bcl to set PIC register
25068 move LR->R31
25069 move R0->LR
25071 When we're called from the epilogue, we need to avoid counting
25072 this as a store. */
25074 push_topmost_sequence ();
25075 top = get_insns ();
25076 pop_topmost_sequence ();
25077 reg = gen_rtx_REG (Pmode, LR_REGNO);
25079 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25081 if (INSN_P (insn))
25083 if (CALL_P (insn))
25085 if (!SIBLING_CALL_P (insn))
25086 return 1;
25088 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25089 return 1;
25090 else if (set_of (reg, insn) != NULL_RTX
25091 && !prologue_epilogue_contains (insn))
25092 return 1;
25095 return 0;
25098 /* Emit instructions needed to load the TOC register.
25099 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25100 a constant pool; or for SVR4 -fpic. */
25102 void
25103 rs6000_emit_load_toc_table (int fromprolog)
25105 rtx dest;
25106 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25108 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25110 char buf[30];
25111 rtx lab, tmp1, tmp2, got;
25113 lab = gen_label_rtx ();
25114 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25115 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25116 if (flag_pic == 2)
25118 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25119 need_toc_init = 1;
25121 else
25122 got = rs6000_got_sym ();
25123 tmp1 = tmp2 = dest;
25124 if (!fromprolog)
25126 tmp1 = gen_reg_rtx (Pmode);
25127 tmp2 = gen_reg_rtx (Pmode);
25129 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25130 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25131 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25132 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25134 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25136 emit_insn (gen_load_toc_v4_pic_si ());
25137 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25139 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25141 char buf[30];
25142 rtx temp0 = (fromprolog
25143 ? gen_rtx_REG (Pmode, 0)
25144 : gen_reg_rtx (Pmode));
25146 if (fromprolog)
25148 rtx symF, symL;
25150 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25151 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25153 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25154 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25156 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25157 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25158 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25160 else
25162 rtx tocsym, lab;
25164 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25165 need_toc_init = 1;
25166 lab = gen_label_rtx ();
25167 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25168 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25169 if (TARGET_LINK_STACK)
25170 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25171 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25173 emit_insn (gen_addsi3 (dest, temp0, dest));
25175 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25177 /* This is for AIX code running in non-PIC ELF32. */
25178 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25180 need_toc_init = 1;
25181 emit_insn (gen_elf_high (dest, realsym));
25182 emit_insn (gen_elf_low (dest, dest, realsym));
25184 else
25186 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25188 if (TARGET_32BIT)
25189 emit_insn (gen_load_toc_aix_si (dest));
25190 else
25191 emit_insn (gen_load_toc_aix_di (dest));
25195 /* Emit instructions to restore the link register after determining where
25196 its value has been stored. */
25198 void
25199 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25201 rs6000_stack_t *info = rs6000_stack_info ();
25202 rtx operands[2];
25204 operands[0] = source;
25205 operands[1] = scratch;
25207 if (info->lr_save_p)
25209 rtx frame_rtx = stack_pointer_rtx;
25210 HOST_WIDE_INT sp_offset = 0;
25211 rtx tmp;
25213 if (frame_pointer_needed
25214 || cfun->calls_alloca
25215 || info->total_size > 32767)
25217 tmp = gen_frame_mem (Pmode, frame_rtx);
25218 emit_move_insn (operands[1], tmp);
25219 frame_rtx = operands[1];
25221 else if (info->push_p)
25222 sp_offset = info->total_size;
25224 tmp = plus_constant (Pmode, frame_rtx,
25225 info->lr_save_offset + sp_offset);
25226 tmp = gen_frame_mem (Pmode, tmp);
25227 emit_move_insn (tmp, operands[0]);
25229 else
25230 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25232 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25233 state of lr_save_p so any change from here on would be a bug. In
25234 particular, stop rs6000_ra_ever_killed from considering the SET
25235 of lr we may have added just above. */
25236 cfun->machine->lr_save_state = info->lr_save_p + 1;
25239 static GTY(()) alias_set_type set = -1;
25241 alias_set_type
25242 get_TOC_alias_set (void)
25244 if (set == -1)
25245 set = new_alias_set ();
25246 return set;
25249 /* This returns nonzero if the current function uses the TOC. This is
25250 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25251 is generated by the ABI_V4 load_toc_* patterns. */
25252 #if TARGET_ELF
25253 static int
25254 uses_TOC (void)
25256 rtx_insn *insn;
25258 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25259 if (INSN_P (insn))
25261 rtx pat = PATTERN (insn);
25262 int i;
25264 if (GET_CODE (pat) == PARALLEL)
25265 for (i = 0; i < XVECLEN (pat, 0); i++)
25267 rtx sub = XVECEXP (pat, 0, i);
25268 if (GET_CODE (sub) == USE)
25270 sub = XEXP (sub, 0);
25271 if (GET_CODE (sub) == UNSPEC
25272 && XINT (sub, 1) == UNSPEC_TOC)
25273 return 1;
25277 return 0;
25279 #endif
25282 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25284 rtx tocrel, tocreg, hi;
25286 if (TARGET_DEBUG_ADDR)
25288 if (GET_CODE (symbol) == SYMBOL_REF)
25289 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25290 XSTR (symbol, 0));
25291 else
25293 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25294 GET_RTX_NAME (GET_CODE (symbol)));
25295 debug_rtx (symbol);
25299 if (!can_create_pseudo_p ())
25300 df_set_regs_ever_live (TOC_REGISTER, true);
25302 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25303 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25304 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25305 return tocrel;
25307 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25308 if (largetoc_reg != NULL)
25310 emit_move_insn (largetoc_reg, hi);
25311 hi = largetoc_reg;
25313 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25316 /* Issue assembly directives that create a reference to the given DWARF
25317 FRAME_TABLE_LABEL from the current function section. */
25318 void
25319 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25321 fprintf (asm_out_file, "\t.ref %s\n",
25322 (* targetm.strip_name_encoding) (frame_table_label));
25325 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25326 and the change to the stack pointer. */
25328 static void
25329 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25331 rtvec p;
25332 int i;
25333 rtx regs[3];
25335 i = 0;
25336 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25337 if (hard_frame_needed)
25338 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25339 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25340 || (hard_frame_needed
25341 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25342 regs[i++] = fp;
25344 p = rtvec_alloc (i);
25345 while (--i >= 0)
25347 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25348 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25351 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25354 /* Emit the correct code for allocating stack space, as insns.
25355 If COPY_REG, make sure a copy of the old frame is left there.
25356 The generated code may use hard register 0 as a temporary. */
25358 static rtx_insn *
25359 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25361 rtx_insn *insn;
25362 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25363 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25364 rtx todec = gen_int_mode (-size, Pmode);
25365 rtx par, set, mem;
25367 if (INTVAL (todec) != -size)
25369 warning (0, "stack frame too large");
25370 emit_insn (gen_trap ());
25371 return 0;
25374 if (crtl->limit_stack)
25376 if (REG_P (stack_limit_rtx)
25377 && REGNO (stack_limit_rtx) > 1
25378 && REGNO (stack_limit_rtx) <= 31)
25380 rtx_insn *insn
25381 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25382 gcc_assert (insn);
25383 emit_insn (insn);
25384 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25386 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25387 && TARGET_32BIT
25388 && DEFAULT_ABI == ABI_V4
25389 && !flag_pic)
25391 rtx toload = gen_rtx_CONST (VOIDmode,
25392 gen_rtx_PLUS (Pmode,
25393 stack_limit_rtx,
25394 GEN_INT (size)));
25396 emit_insn (gen_elf_high (tmp_reg, toload));
25397 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25398 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25399 const0_rtx));
25401 else
25402 warning (0, "stack limit expression is not supported");
25405 if (copy_reg)
25407 if (copy_off != 0)
25408 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25409 else
25410 emit_move_insn (copy_reg, stack_reg);
25413 if (size > 32767)
25415 /* Need a note here so that try_split doesn't get confused. */
25416 if (get_last_insn () == NULL_RTX)
25417 emit_note (NOTE_INSN_DELETED);
25418 insn = emit_move_insn (tmp_reg, todec);
25419 try_split (PATTERN (insn), insn, 0);
25420 todec = tmp_reg;
25423 insn = emit_insn (TARGET_32BIT
25424 ? gen_movsi_update_stack (stack_reg, stack_reg,
25425 todec, stack_reg)
25426 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25427 todec, stack_reg));
25428 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25429 it now and set the alias set/attributes. The above gen_*_update
25430 calls will generate a PARALLEL with the MEM set being the first
25431 operation. */
25432 par = PATTERN (insn);
25433 gcc_assert (GET_CODE (par) == PARALLEL);
25434 set = XVECEXP (par, 0, 0);
25435 gcc_assert (GET_CODE (set) == SET);
25436 mem = SET_DEST (set);
25437 gcc_assert (MEM_P (mem));
25438 MEM_NOTRAP_P (mem) = 1;
25439 set_mem_alias_set (mem, get_frame_alias_set ());
25441 RTX_FRAME_RELATED_P (insn) = 1;
25442 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25443 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25444 GEN_INT (-size))));
25445 return insn;
25448 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25450 #if PROBE_INTERVAL > 32768
25451 #error Cannot use indexed addressing mode for stack probing
25452 #endif
25454 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25455 inclusive. These are offsets from the current stack pointer. */
25457 static void
25458 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25460 /* See if we have a constant small number of probes to generate. If so,
25461 that's the easy case. */
25462 if (first + size <= 32768)
25464 HOST_WIDE_INT i;
25466 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25467 it exceeds SIZE. If only one probe is needed, this will not
25468 generate any code. Then probe at FIRST + SIZE. */
25469 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25470 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25471 -(first + i)));
25473 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25474 -(first + size)));
25477 /* Otherwise, do the same as above, but in a loop. Note that we must be
25478 extra careful with variables wrapping around because we might be at
25479 the very top (or the very bottom) of the address space and we have
25480 to be able to handle this case properly; in particular, we use an
25481 equality test for the loop condition. */
25482 else
25484 HOST_WIDE_INT rounded_size;
25485 rtx r12 = gen_rtx_REG (Pmode, 12);
25486 rtx r0 = gen_rtx_REG (Pmode, 0);
25488 /* Sanity check for the addressing mode we're going to use. */
25489 gcc_assert (first <= 32768);
25491 /* Step 1: round SIZE to the previous multiple of the interval. */
25493 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25496 /* Step 2: compute initial and final value of the loop counter. */
25498 /* TEST_ADDR = SP + FIRST. */
25499 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25500 -first)));
25502 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25503 if (rounded_size > 32768)
25505 emit_move_insn (r0, GEN_INT (-rounded_size));
25506 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25508 else
25509 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25510 -rounded_size)));
25513 /* Step 3: the loop
25517 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25518 probe at TEST_ADDR
25520 while (TEST_ADDR != LAST_ADDR)
25522 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25523 until it is equal to ROUNDED_SIZE. */
25525 if (TARGET_64BIT)
25526 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25527 else
25528 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25531 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25532 that SIZE is equal to ROUNDED_SIZE. */
25534 if (size != rounded_size)
25535 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25539 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25540 absolute addresses. */
25542 const char *
25543 output_probe_stack_range (rtx reg1, rtx reg2)
25545 static int labelno = 0;
25546 char loop_lab[32];
25547 rtx xops[2];
25549 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25551 /* Loop. */
25552 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25554 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25555 xops[0] = reg1;
25556 xops[1] = GEN_INT (-PROBE_INTERVAL);
25557 output_asm_insn ("addi %0,%0,%1", xops);
25559 /* Probe at TEST_ADDR. */
25560 xops[1] = gen_rtx_REG (Pmode, 0);
25561 output_asm_insn ("stw %1,0(%0)", xops);
25563 /* Test if TEST_ADDR == LAST_ADDR. */
25564 xops[1] = reg2;
25565 if (TARGET_64BIT)
25566 output_asm_insn ("cmpd 0,%0,%1", xops);
25567 else
25568 output_asm_insn ("cmpw 0,%0,%1", xops);
25570 /* Branch. */
25571 fputs ("\tbne 0,", asm_out_file);
25572 assemble_name_raw (asm_out_file, loop_lab);
25573 fputc ('\n', asm_out_file);
25575 return "";
25578 /* This function is called when rs6000_frame_related is processing
25579 SETs within a PARALLEL, and returns whether the REGNO save ought to
25580 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25581 for out-of-line register save functions, store multiple, and the
25582 Darwin world_save. They may contain registers that don't really
25583 need saving. */
25585 static bool
25586 interesting_frame_related_regno (unsigned int regno)
25588 /* Saves apparently of r0 are actually saving LR. It doesn't make
25589 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25590 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25591 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25592 as frame related. */
25593 if (regno == 0)
25594 return true;
25595 /* If we see CR2 then we are here on a Darwin world save. Saves of
25596 CR2 signify the whole CR is being saved. This is a long-standing
25597 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25598 that CR needs to be saved. */
25599 if (regno == CR2_REGNO)
25600 return true;
25601 /* Omit frame info for any user-defined global regs. If frame info
25602 is supplied for them, frame unwinding will restore a user reg.
25603 Also omit frame info for any reg we don't need to save, as that
25604 bloats frame info and can cause problems with shrink wrapping.
25605 Since global regs won't be seen as needing to be saved, both of
25606 these conditions are covered by save_reg_p. */
25607 return save_reg_p (regno);
25610 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25611 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25612 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25613 deduce these equivalences by itself so it wasn't necessary to hold
25614 its hand so much. Don't be tempted to always supply d2_f_d_e with
25615 the actual cfa register, ie. r31 when we are using a hard frame
25616 pointer. That fails when saving regs off r1, and sched moves the
25617 r31 setup past the reg saves. */
25619 static rtx_insn *
25620 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25621 rtx reg2, rtx repl2)
25623 rtx repl;
25625 if (REGNO (reg) == STACK_POINTER_REGNUM)
25627 gcc_checking_assert (val == 0);
25628 repl = NULL_RTX;
25630 else
25631 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25632 GEN_INT (val));
25634 rtx pat = PATTERN (insn);
25635 if (!repl && !reg2)
25637 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25638 if (GET_CODE (pat) == PARALLEL)
25639 for (int i = 0; i < XVECLEN (pat, 0); i++)
25640 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25642 rtx set = XVECEXP (pat, 0, i);
25644 if (!REG_P (SET_SRC (set))
25645 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25646 RTX_FRAME_RELATED_P (set) = 1;
25648 RTX_FRAME_RELATED_P (insn) = 1;
25649 return insn;
25652 /* We expect that 'pat' is either a SET or a PARALLEL containing
25653 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25654 are important so they all have to be marked RTX_FRAME_RELATED_P.
25655 Call simplify_replace_rtx on the SETs rather than the whole insn
25656 so as to leave the other stuff alone (for example USE of r12). */
25658 set_used_flags (pat);
25659 if (GET_CODE (pat) == SET)
25661 if (repl)
25662 pat = simplify_replace_rtx (pat, reg, repl);
25663 if (reg2)
25664 pat = simplify_replace_rtx (pat, reg2, repl2);
25666 else if (GET_CODE (pat) == PARALLEL)
25668 pat = shallow_copy_rtx (pat);
25669 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25671 for (int i = 0; i < XVECLEN (pat, 0); i++)
25672 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25674 rtx set = XVECEXP (pat, 0, i);
25676 if (repl)
25677 set = simplify_replace_rtx (set, reg, repl);
25678 if (reg2)
25679 set = simplify_replace_rtx (set, reg2, repl2);
25680 XVECEXP (pat, 0, i) = set;
25682 if (!REG_P (SET_SRC (set))
25683 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25684 RTX_FRAME_RELATED_P (set) = 1;
25687 else
25688 gcc_unreachable ();
25690 RTX_FRAME_RELATED_P (insn) = 1;
25691 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25693 return insn;
25696 /* Returns an insn that has a vrsave set operation with the
25697 appropriate CLOBBERs. */
25699 static rtx
25700 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25702 int nclobs, i;
25703 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25704 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25706 clobs[0]
25707 = gen_rtx_SET (vrsave,
25708 gen_rtx_UNSPEC_VOLATILE (SImode,
25709 gen_rtvec (2, reg, vrsave),
25710 UNSPECV_SET_VRSAVE));
25712 nclobs = 1;
25714 /* We need to clobber the registers in the mask so the scheduler
25715 does not move sets to VRSAVE before sets of AltiVec registers.
25717 However, if the function receives nonlocal gotos, reload will set
25718 all call saved registers live. We will end up with:
25720 (set (reg 999) (mem))
25721 (parallel [ (set (reg vrsave) (unspec blah))
25722 (clobber (reg 999))])
25724 The clobber will cause the store into reg 999 to be dead, and
25725 flow will attempt to delete an epilogue insn. In this case, we
25726 need an unspec use/set of the register. */
25728 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25729 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25731 if (!epiloguep || call_used_regs [i])
25732 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25733 gen_rtx_REG (V4SImode, i));
25734 else
25736 rtx reg = gen_rtx_REG (V4SImode, i);
25738 clobs[nclobs++]
25739 = gen_rtx_SET (reg,
25740 gen_rtx_UNSPEC (V4SImode,
25741 gen_rtvec (1, reg), 27));
25745 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25747 for (i = 0; i < nclobs; ++i)
25748 XVECEXP (insn, 0, i) = clobs[i];
25750 return insn;
25753 static rtx
25754 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25756 rtx addr, mem;
25758 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25759 mem = gen_frame_mem (GET_MODE (reg), addr);
25760 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25763 static rtx
25764 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25766 return gen_frame_set (reg, frame_reg, offset, false);
25769 static rtx
25770 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25772 return gen_frame_set (reg, frame_reg, offset, true);
25775 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25776 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25778 static rtx_insn *
25779 emit_frame_save (rtx frame_reg, machine_mode mode,
25780 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25782 rtx reg;
25784 /* Some cases that need register indexed addressing. */
25785 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25786 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25788 reg = gen_rtx_REG (mode, regno);
25789 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25790 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25791 NULL_RTX, NULL_RTX);
25794 /* Emit an offset memory reference suitable for a frame store, while
25795 converting to a valid addressing mode. */
25797 static rtx
25798 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25800 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25803 #ifndef TARGET_FIX_AND_CONTINUE
25804 #define TARGET_FIX_AND_CONTINUE 0
25805 #endif
25807 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25808 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25809 #define LAST_SAVRES_REGISTER 31
25810 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25812 enum {
25813 SAVRES_LR = 0x1,
25814 SAVRES_SAVE = 0x2,
25815 SAVRES_REG = 0x0c,
25816 SAVRES_GPR = 0,
25817 SAVRES_FPR = 4,
25818 SAVRES_VR = 8
25821 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25823 /* Temporary holding space for an out-of-line register save/restore
25824 routine name. */
25825 static char savres_routine_name[30];
25827 /* Return the name for an out-of-line register save/restore routine.
25828 We are saving/restoring GPRs if GPR is true. */
25830 static char *
25831 rs6000_savres_routine_name (int regno, int sel)
25833 const char *prefix = "";
25834 const char *suffix = "";
25836 /* Different targets are supposed to define
25837 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25838 routine name could be defined with:
25840 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25842 This is a nice idea in practice, but in reality, things are
25843 complicated in several ways:
25845 - ELF targets have save/restore routines for GPRs.
25847 - PPC64 ELF targets have routines for save/restore of GPRs that
25848 differ in what they do with the link register, so having a set
25849 prefix doesn't work. (We only use one of the save routines at
25850 the moment, though.)
25852 - PPC32 elf targets have "exit" versions of the restore routines
25853 that restore the link register and can save some extra space.
25854 These require an extra suffix. (There are also "tail" versions
25855 of the restore routines and "GOT" versions of the save routines,
25856 but we don't generate those at present. Same problems apply,
25857 though.)
25859 We deal with all this by synthesizing our own prefix/suffix and
25860 using that for the simple sprintf call shown above. */
25861 if (DEFAULT_ABI == ABI_V4)
25863 if (TARGET_64BIT)
25864 goto aix_names;
25866 if ((sel & SAVRES_REG) == SAVRES_GPR)
25867 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25868 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25869 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25870 else if ((sel & SAVRES_REG) == SAVRES_VR)
25871 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25872 else
25873 abort ();
25875 if ((sel & SAVRES_LR))
25876 suffix = "_x";
25878 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25880 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25881 /* No out-of-line save/restore routines for GPRs on AIX. */
25882 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25883 #endif
25885 aix_names:
25886 if ((sel & SAVRES_REG) == SAVRES_GPR)
25887 prefix = ((sel & SAVRES_SAVE)
25888 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25889 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25890 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25892 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25893 if ((sel & SAVRES_LR))
25894 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25895 else
25896 #endif
25898 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25899 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25902 else if ((sel & SAVRES_REG) == SAVRES_VR)
25903 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25904 else
25905 abort ();
25908 if (DEFAULT_ABI == ABI_DARWIN)
25910 /* The Darwin approach is (slightly) different, in order to be
25911 compatible with code generated by the system toolchain. There is a
25912 single symbol for the start of save sequence, and the code here
25913 embeds an offset into that code on the basis of the first register
25914 to be saved. */
25915 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25916 if ((sel & SAVRES_REG) == SAVRES_GPR)
25917 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25918 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25919 (regno - 13) * 4, prefix, regno);
25920 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25921 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25922 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25923 else if ((sel & SAVRES_REG) == SAVRES_VR)
25924 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25925 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25926 else
25927 abort ();
25929 else
25930 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25932 return savres_routine_name;
25935 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25936 We are saving/restoring GPRs if GPR is true. */
25938 static rtx
25939 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25941 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25942 ? info->first_gp_reg_save
25943 : (sel & SAVRES_REG) == SAVRES_FPR
25944 ? info->first_fp_reg_save - 32
25945 : (sel & SAVRES_REG) == SAVRES_VR
25946 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25947 : -1);
25948 rtx sym;
25949 int select = sel;
25951 /* Don't generate bogus routine names. */
25952 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25953 && regno <= LAST_SAVRES_REGISTER
25954 && select >= 0 && select <= 12);
25956 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25958 if (sym == NULL)
25960 char *name;
25962 name = rs6000_savres_routine_name (regno, sel);
25964 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25965 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25966 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25969 return sym;
25972 /* Emit a sequence of insns, including a stack tie if needed, for
25973 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25974 reset the stack pointer, but move the base of the frame into
25975 reg UPDT_REGNO for use by out-of-line register restore routines. */
25977 static rtx
25978 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25979 unsigned updt_regno)
25981 /* If there is nothing to do, don't do anything. */
25982 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25983 return NULL_RTX;
25985 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25987 /* This blockage is needed so that sched doesn't decide to move
25988 the sp change before the register restores. */
25989 if (DEFAULT_ABI == ABI_V4)
25990 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25991 GEN_INT (frame_off)));
25993 /* If we are restoring registers out-of-line, we will be using the
25994 "exit" variants of the restore routines, which will reset the
25995 stack for us. But we do need to point updt_reg into the
25996 right place for those routines. */
25997 if (frame_off != 0)
25998 return emit_insn (gen_add3_insn (updt_reg_rtx,
25999 frame_reg_rtx, GEN_INT (frame_off)));
26000 else
26001 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26003 return NULL_RTX;
26006 /* Return the register number used as a pointer by out-of-line
26007 save/restore functions. */
26009 static inline unsigned
26010 ptr_regno_for_savres (int sel)
26012 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26013 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26014 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26017 /* Construct a parallel rtx describing the effect of a call to an
26018 out-of-line register save/restore routine, and emit the insn
26019 or jump_insn as appropriate. */
26021 static rtx_insn *
26022 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26023 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26024 machine_mode reg_mode, int sel)
26026 int i;
26027 int offset, start_reg, end_reg, n_regs, use_reg;
26028 int reg_size = GET_MODE_SIZE (reg_mode);
26029 rtx sym;
26030 rtvec p;
26031 rtx par;
26032 rtx_insn *insn;
26034 offset = 0;
26035 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26036 ? info->first_gp_reg_save
26037 : (sel & SAVRES_REG) == SAVRES_FPR
26038 ? info->first_fp_reg_save
26039 : (sel & SAVRES_REG) == SAVRES_VR
26040 ? info->first_altivec_reg_save
26041 : -1);
26042 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26043 ? 32
26044 : (sel & SAVRES_REG) == SAVRES_FPR
26045 ? 64
26046 : (sel & SAVRES_REG) == SAVRES_VR
26047 ? LAST_ALTIVEC_REGNO + 1
26048 : -1);
26049 n_regs = end_reg - start_reg;
26050 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26051 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26052 + n_regs);
26054 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26055 RTVEC_ELT (p, offset++) = ret_rtx;
26057 RTVEC_ELT (p, offset++)
26058 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26060 sym = rs6000_savres_routine_sym (info, sel);
26061 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26063 use_reg = ptr_regno_for_savres (sel);
26064 if ((sel & SAVRES_REG) == SAVRES_VR)
26066 /* Vector regs are saved/restored using [reg+reg] addressing. */
26067 RTVEC_ELT (p, offset++)
26068 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26069 RTVEC_ELT (p, offset++)
26070 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26072 else
26073 RTVEC_ELT (p, offset++)
26074 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26076 for (i = 0; i < end_reg - start_reg; i++)
26077 RTVEC_ELT (p, i + offset)
26078 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26079 frame_reg_rtx, save_area_offset + reg_size * i,
26080 (sel & SAVRES_SAVE) != 0);
26082 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26083 RTVEC_ELT (p, i + offset)
26084 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26086 par = gen_rtx_PARALLEL (VOIDmode, p);
26088 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26090 insn = emit_jump_insn (par);
26091 JUMP_LABEL (insn) = ret_rtx;
26093 else
26094 insn = emit_insn (par);
26095 return insn;
26098 /* Emit prologue code to store CR fields that need to be saved into REG. This
26099 function should only be called when moving the non-volatile CRs to REG, it
26100 is not a general purpose routine to move the entire set of CRs to REG.
26101 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26102 volatile CRs. */
26104 static void
26105 rs6000_emit_prologue_move_from_cr (rtx reg)
26107 /* Only the ELFv2 ABI allows storing only selected fields. */
26108 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26110 int i, cr_reg[8], count = 0;
26112 /* Collect CR fields that must be saved. */
26113 for (i = 0; i < 8; i++)
26114 if (save_reg_p (CR0_REGNO + i))
26115 cr_reg[count++] = i;
26117 /* If it's just a single one, use mfcrf. */
26118 if (count == 1)
26120 rtvec p = rtvec_alloc (1);
26121 rtvec r = rtvec_alloc (2);
26122 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26123 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26124 RTVEC_ELT (p, 0)
26125 = gen_rtx_SET (reg,
26126 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26128 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26129 return;
26132 /* ??? It might be better to handle count == 2 / 3 cases here
26133 as well, using logical operations to combine the values. */
26136 emit_insn (gen_prologue_movesi_from_cr (reg));
26139 /* Return whether the split-stack arg pointer (r12) is used. */
26141 static bool
26142 split_stack_arg_pointer_used_p (void)
26144 /* If the pseudo holding the arg pointer is no longer a pseudo,
26145 then the arg pointer is used. */
26146 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26147 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26148 || (REGNO (cfun->machine->split_stack_arg_pointer)
26149 < FIRST_PSEUDO_REGISTER)))
26150 return true;
26152 /* Unfortunately we also need to do some code scanning, since
26153 r12 may have been substituted for the pseudo. */
26154 rtx_insn *insn;
26155 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26156 FOR_BB_INSNS (bb, insn)
26157 if (NONDEBUG_INSN_P (insn))
26159 /* A call destroys r12. */
26160 if (CALL_P (insn))
26161 return false;
26163 df_ref use;
26164 FOR_EACH_INSN_USE (use, insn)
26166 rtx x = DF_REF_REG (use);
26167 if (REG_P (x) && REGNO (x) == 12)
26168 return true;
26170 df_ref def;
26171 FOR_EACH_INSN_DEF (def, insn)
26173 rtx x = DF_REF_REG (def);
26174 if (REG_P (x) && REGNO (x) == 12)
26175 return false;
26178 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26181 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26183 static bool
26184 rs6000_global_entry_point_needed_p (void)
26186 /* Only needed for the ELFv2 ABI. */
26187 if (DEFAULT_ABI != ABI_ELFv2)
26188 return false;
26190 /* With -msingle-pic-base, we assume the whole program shares the same
26191 TOC, so no global entry point prologues are needed anywhere. */
26192 if (TARGET_SINGLE_PIC_BASE)
26193 return false;
26195 /* Ensure we have a global entry point for thunks. ??? We could
26196 avoid that if the target routine doesn't need a global entry point,
26197 but we do not know whether this is the case at this point. */
26198 if (cfun->is_thunk)
26199 return true;
26201 /* For regular functions, rs6000_emit_prologue sets this flag if the
26202 routine ever uses the TOC pointer. */
26203 return cfun->machine->r2_setup_needed;
26206 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26207 static sbitmap
26208 rs6000_get_separate_components (void)
26210 rs6000_stack_t *info = rs6000_stack_info ();
26212 if (WORLD_SAVE_P (info))
26213 return NULL;
26215 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26216 && !(info->savres_strategy & REST_MULTIPLE));
26218 /* Component 0 is the save/restore of LR (done via GPR0).
26219 Components 13..31 are the save/restore of GPR13..GPR31.
26220 Components 46..63 are the save/restore of FPR14..FPR31. */
26222 cfun->machine->n_components = 64;
26224 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26225 bitmap_clear (components);
26227 int reg_size = TARGET_32BIT ? 4 : 8;
26228 int fp_reg_size = 8;
26230 /* The GPRs we need saved to the frame. */
26231 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26232 && (info->savres_strategy & REST_INLINE_GPRS))
26234 int offset = info->gp_save_offset;
26235 if (info->push_p)
26236 offset += info->total_size;
26238 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26240 if (IN_RANGE (offset, -0x8000, 0x7fff)
26241 && save_reg_p (regno))
26242 bitmap_set_bit (components, regno);
26244 offset += reg_size;
26248 /* Don't mess with the hard frame pointer. */
26249 if (frame_pointer_needed)
26250 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26252 /* Don't mess with the fixed TOC register. */
26253 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26254 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26255 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26256 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26258 /* The FPRs we need saved to the frame. */
26259 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26260 && (info->savres_strategy & REST_INLINE_FPRS))
26262 int offset = info->fp_save_offset;
26263 if (info->push_p)
26264 offset += info->total_size;
26266 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26268 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26269 bitmap_set_bit (components, regno);
26271 offset += fp_reg_size;
26275 /* Optimize LR save and restore if we can. This is component 0. Any
26276 out-of-line register save/restore routines need LR. */
26277 if (info->lr_save_p
26278 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26279 && (info->savres_strategy & SAVE_INLINE_GPRS)
26280 && (info->savres_strategy & REST_INLINE_GPRS)
26281 && (info->savres_strategy & SAVE_INLINE_FPRS)
26282 && (info->savres_strategy & REST_INLINE_FPRS)
26283 && (info->savres_strategy & SAVE_INLINE_VRS)
26284 && (info->savres_strategy & REST_INLINE_VRS))
26286 int offset = info->lr_save_offset;
26287 if (info->push_p)
26288 offset += info->total_size;
26289 if (IN_RANGE (offset, -0x8000, 0x7fff))
26290 bitmap_set_bit (components, 0);
26293 return components;
26296 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26297 static sbitmap
26298 rs6000_components_for_bb (basic_block bb)
26300 rs6000_stack_t *info = rs6000_stack_info ();
26302 bitmap in = DF_LIVE_IN (bb);
26303 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26304 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26306 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26307 bitmap_clear (components);
26309 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26311 /* GPRs. */
26312 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26313 if (bitmap_bit_p (in, regno)
26314 || bitmap_bit_p (gen, regno)
26315 || bitmap_bit_p (kill, regno))
26316 bitmap_set_bit (components, regno);
26318 /* FPRs. */
26319 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26320 if (bitmap_bit_p (in, regno)
26321 || bitmap_bit_p (gen, regno)
26322 || bitmap_bit_p (kill, regno))
26323 bitmap_set_bit (components, regno);
26325 /* The link register. */
26326 if (bitmap_bit_p (in, LR_REGNO)
26327 || bitmap_bit_p (gen, LR_REGNO)
26328 || bitmap_bit_p (kill, LR_REGNO))
26329 bitmap_set_bit (components, 0);
26331 return components;
26334 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26335 static void
26336 rs6000_disqualify_components (sbitmap components, edge e,
26337 sbitmap edge_components, bool /*is_prologue*/)
26339 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26340 live where we want to place that code. */
26341 if (bitmap_bit_p (edge_components, 0)
26342 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26344 if (dump_file)
26345 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26346 "on entry to bb %d\n", e->dest->index);
26347 bitmap_clear_bit (components, 0);
26351 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26352 static void
26353 rs6000_emit_prologue_components (sbitmap components)
26355 rs6000_stack_t *info = rs6000_stack_info ();
26356 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26357 ? HARD_FRAME_POINTER_REGNUM
26358 : STACK_POINTER_REGNUM);
26360 machine_mode reg_mode = Pmode;
26361 int reg_size = TARGET_32BIT ? 4 : 8;
26362 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26363 ? DFmode : SFmode;
26364 int fp_reg_size = 8;
26366 /* Prologue for LR. */
26367 if (bitmap_bit_p (components, 0))
26369 rtx reg = gen_rtx_REG (reg_mode, 0);
26370 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26371 RTX_FRAME_RELATED_P (insn) = 1;
26372 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26374 int offset = info->lr_save_offset;
26375 if (info->push_p)
26376 offset += info->total_size;
26378 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26379 RTX_FRAME_RELATED_P (insn) = 1;
26380 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26381 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26382 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26385 /* Prologue for the GPRs. */
26386 int offset = info->gp_save_offset;
26387 if (info->push_p)
26388 offset += info->total_size;
26390 for (int i = info->first_gp_reg_save; i < 32; i++)
26392 if (bitmap_bit_p (components, i))
26394 rtx reg = gen_rtx_REG (reg_mode, i);
26395 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26396 RTX_FRAME_RELATED_P (insn) = 1;
26397 rtx set = copy_rtx (single_set (insn));
26398 add_reg_note (insn, REG_CFA_OFFSET, set);
26401 offset += reg_size;
26404 /* Prologue for the FPRs. */
26405 offset = info->fp_save_offset;
26406 if (info->push_p)
26407 offset += info->total_size;
26409 for (int i = info->first_fp_reg_save; i < 64; i++)
26411 if (bitmap_bit_p (components, i))
26413 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26414 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26415 RTX_FRAME_RELATED_P (insn) = 1;
26416 rtx set = copy_rtx (single_set (insn));
26417 add_reg_note (insn, REG_CFA_OFFSET, set);
26420 offset += fp_reg_size;
26424 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26425 static void
26426 rs6000_emit_epilogue_components (sbitmap components)
26428 rs6000_stack_t *info = rs6000_stack_info ();
26429 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26430 ? HARD_FRAME_POINTER_REGNUM
26431 : STACK_POINTER_REGNUM);
26433 machine_mode reg_mode = Pmode;
26434 int reg_size = TARGET_32BIT ? 4 : 8;
26436 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26437 ? DFmode : SFmode;
26438 int fp_reg_size = 8;
26440 /* Epilogue for the FPRs. */
26441 int offset = info->fp_save_offset;
26442 if (info->push_p)
26443 offset += info->total_size;
26445 for (int i = info->first_fp_reg_save; i < 64; i++)
26447 if (bitmap_bit_p (components, i))
26449 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26450 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26451 RTX_FRAME_RELATED_P (insn) = 1;
26452 add_reg_note (insn, REG_CFA_RESTORE, reg);
26455 offset += fp_reg_size;
26458 /* Epilogue for the GPRs. */
26459 offset = info->gp_save_offset;
26460 if (info->push_p)
26461 offset += info->total_size;
26463 for (int i = info->first_gp_reg_save; i < 32; i++)
26465 if (bitmap_bit_p (components, i))
26467 rtx reg = gen_rtx_REG (reg_mode, i);
26468 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26469 RTX_FRAME_RELATED_P (insn) = 1;
26470 add_reg_note (insn, REG_CFA_RESTORE, reg);
26473 offset += reg_size;
26476 /* Epilogue for LR. */
26477 if (bitmap_bit_p (components, 0))
26479 int offset = info->lr_save_offset;
26480 if (info->push_p)
26481 offset += info->total_size;
26483 rtx reg = gen_rtx_REG (reg_mode, 0);
26484 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26486 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26487 insn = emit_move_insn (lr, reg);
26488 RTX_FRAME_RELATED_P (insn) = 1;
26489 add_reg_note (insn, REG_CFA_RESTORE, lr);
26493 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26494 static void
26495 rs6000_set_handled_components (sbitmap components)
26497 rs6000_stack_t *info = rs6000_stack_info ();
26499 for (int i = info->first_gp_reg_save; i < 32; i++)
26500 if (bitmap_bit_p (components, i))
26501 cfun->machine->gpr_is_wrapped_separately[i] = true;
26503 for (int i = info->first_fp_reg_save; i < 64; i++)
26504 if (bitmap_bit_p (components, i))
26505 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26507 if (bitmap_bit_p (components, 0))
26508 cfun->machine->lr_is_wrapped_separately = true;
26511 /* VRSAVE is a bit vector representing which AltiVec registers
26512 are used. The OS uses this to determine which vector
26513 registers to save on a context switch. We need to save
26514 VRSAVE on the stack frame, add whatever AltiVec registers we
26515 used in this function, and do the corresponding magic in the
26516 epilogue. */
26517 static void
26518 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26519 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26521 /* Get VRSAVE into a GPR. */
26522 rtx reg = gen_rtx_REG (SImode, save_regno);
26523 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26524 if (TARGET_MACHO)
26525 emit_insn (gen_get_vrsave_internal (reg));
26526 else
26527 emit_insn (gen_rtx_SET (reg, vrsave));
26529 /* Save VRSAVE. */
26530 int offset = info->vrsave_save_offset + frame_off;
26531 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26533 /* Include the registers in the mask. */
26534 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26536 emit_insn (generate_set_vrsave (reg, info, 0));
26539 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26540 called, it left the arg pointer to the old stack in r29. Otherwise, the
26541 arg pointer is the top of the current frame. */
26542 static void
26543 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26544 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26546 cfun->machine->split_stack_argp_used = true;
26548 if (sp_adjust)
26550 rtx r12 = gen_rtx_REG (Pmode, 12);
26551 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26552 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26553 emit_insn_before (set_r12, sp_adjust);
26555 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26557 rtx r12 = gen_rtx_REG (Pmode, 12);
26558 if (frame_off == 0)
26559 emit_move_insn (r12, frame_reg_rtx);
26560 else
26561 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26564 if (info->push_p)
26566 rtx r12 = gen_rtx_REG (Pmode, 12);
26567 rtx r29 = gen_rtx_REG (Pmode, 29);
26568 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26569 rtx not_more = gen_label_rtx ();
26570 rtx jump;
26572 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26573 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26574 gen_rtx_LABEL_REF (VOIDmode, not_more),
26575 pc_rtx);
26576 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26577 JUMP_LABEL (jump) = not_more;
26578 LABEL_NUSES (not_more) += 1;
26579 emit_move_insn (r12, r29);
26580 emit_label (not_more);
26584 /* Emit function prologue as insns. */
26586 void
26587 rs6000_emit_prologue (void)
26589 rs6000_stack_t *info = rs6000_stack_info ();
26590 machine_mode reg_mode = Pmode;
26591 int reg_size = TARGET_32BIT ? 4 : 8;
26592 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26593 ? DFmode : SFmode;
26594 int fp_reg_size = 8;
26595 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26596 rtx frame_reg_rtx = sp_reg_rtx;
26597 unsigned int cr_save_regno;
26598 rtx cr_save_rtx = NULL_RTX;
26599 rtx_insn *insn;
26600 int strategy;
26601 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26602 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26603 && call_used_regs[STATIC_CHAIN_REGNUM]);
26604 int using_split_stack = (flag_split_stack
26605 && (lookup_attribute ("no_split_stack",
26606 DECL_ATTRIBUTES (cfun->decl))
26607 == NULL));
26609 /* Offset to top of frame for frame_reg and sp respectively. */
26610 HOST_WIDE_INT frame_off = 0;
26611 HOST_WIDE_INT sp_off = 0;
26612 /* sp_adjust is the stack adjusting instruction, tracked so that the
26613 insn setting up the split-stack arg pointer can be emitted just
26614 prior to it, when r12 is not used here for other purposes. */
26615 rtx_insn *sp_adjust = 0;
26617 #if CHECKING_P
26618 /* Track and check usage of r0, r11, r12. */
26619 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26620 #define START_USE(R) do \
26622 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26623 reg_inuse |= 1 << (R); \
26624 } while (0)
26625 #define END_USE(R) do \
26627 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26628 reg_inuse &= ~(1 << (R)); \
26629 } while (0)
26630 #define NOT_INUSE(R) do \
26632 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26633 } while (0)
26634 #else
26635 #define START_USE(R) do {} while (0)
26636 #define END_USE(R) do {} while (0)
26637 #define NOT_INUSE(R) do {} while (0)
26638 #endif
26640 if (DEFAULT_ABI == ABI_ELFv2
26641 && !TARGET_SINGLE_PIC_BASE)
26643 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26645 /* With -mminimal-toc we may generate an extra use of r2 below. */
26646 if (TARGET_TOC && TARGET_MINIMAL_TOC
26647 && !constant_pool_empty_p ())
26648 cfun->machine->r2_setup_needed = true;
26652 if (flag_stack_usage_info)
26653 current_function_static_stack_size = info->total_size;
26655 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26657 HOST_WIDE_INT size = info->total_size;
26659 if (crtl->is_leaf && !cfun->calls_alloca)
26661 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26662 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26663 size - STACK_CHECK_PROTECT);
26665 else if (size > 0)
26666 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26669 if (TARGET_FIX_AND_CONTINUE)
26671 /* gdb on darwin arranges to forward a function from the old
26672 address by modifying the first 5 instructions of the function
26673 to branch to the overriding function. This is necessary to
26674 permit function pointers that point to the old function to
26675 actually forward to the new function. */
26676 emit_insn (gen_nop ());
26677 emit_insn (gen_nop ());
26678 emit_insn (gen_nop ());
26679 emit_insn (gen_nop ());
26680 emit_insn (gen_nop ());
26683 /* Handle world saves specially here. */
26684 if (WORLD_SAVE_P (info))
26686 int i, j, sz;
26687 rtx treg;
26688 rtvec p;
26689 rtx reg0;
26691 /* save_world expects lr in r0. */
26692 reg0 = gen_rtx_REG (Pmode, 0);
26693 if (info->lr_save_p)
26695 insn = emit_move_insn (reg0,
26696 gen_rtx_REG (Pmode, LR_REGNO));
26697 RTX_FRAME_RELATED_P (insn) = 1;
26700 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26701 assumptions about the offsets of various bits of the stack
26702 frame. */
26703 gcc_assert (info->gp_save_offset == -220
26704 && info->fp_save_offset == -144
26705 && info->lr_save_offset == 8
26706 && info->cr_save_offset == 4
26707 && info->push_p
26708 && info->lr_save_p
26709 && (!crtl->calls_eh_return
26710 || info->ehrd_offset == -432)
26711 && info->vrsave_save_offset == -224
26712 && info->altivec_save_offset == -416);
26714 treg = gen_rtx_REG (SImode, 11);
26715 emit_move_insn (treg, GEN_INT (-info->total_size));
26717 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26718 in R11. It also clobbers R12, so beware! */
26720 /* Preserve CR2 for save_world prologues */
26721 sz = 5;
26722 sz += 32 - info->first_gp_reg_save;
26723 sz += 64 - info->first_fp_reg_save;
26724 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26725 p = rtvec_alloc (sz);
26726 j = 0;
26727 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26728 gen_rtx_REG (SImode,
26729 LR_REGNO));
26730 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26731 gen_rtx_SYMBOL_REF (Pmode,
26732 "*save_world"));
26733 /* We do floats first so that the instruction pattern matches
26734 properly. */
26735 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26736 RTVEC_ELT (p, j++)
26737 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26738 ? DFmode : SFmode,
26739 info->first_fp_reg_save + i),
26740 frame_reg_rtx,
26741 info->fp_save_offset + frame_off + 8 * i);
26742 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26743 RTVEC_ELT (p, j++)
26744 = gen_frame_store (gen_rtx_REG (V4SImode,
26745 info->first_altivec_reg_save + i),
26746 frame_reg_rtx,
26747 info->altivec_save_offset + frame_off + 16 * i);
26748 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26749 RTVEC_ELT (p, j++)
26750 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26751 frame_reg_rtx,
26752 info->gp_save_offset + frame_off + reg_size * i);
26754 /* CR register traditionally saved as CR2. */
26755 RTVEC_ELT (p, j++)
26756 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26757 frame_reg_rtx, info->cr_save_offset + frame_off);
26758 /* Explain about use of R0. */
26759 if (info->lr_save_p)
26760 RTVEC_ELT (p, j++)
26761 = gen_frame_store (reg0,
26762 frame_reg_rtx, info->lr_save_offset + frame_off);
26763 /* Explain what happens to the stack pointer. */
26765 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26766 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26769 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26770 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26771 treg, GEN_INT (-info->total_size));
26772 sp_off = frame_off = info->total_size;
26775 strategy = info->savres_strategy;
26777 /* For V.4, update stack before we do any saving and set back pointer. */
26778 if (! WORLD_SAVE_P (info)
26779 && info->push_p
26780 && (DEFAULT_ABI == ABI_V4
26781 || crtl->calls_eh_return))
26783 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26784 || !(strategy & SAVE_INLINE_GPRS)
26785 || !(strategy & SAVE_INLINE_VRS));
26786 int ptr_regno = -1;
26787 rtx ptr_reg = NULL_RTX;
26788 int ptr_off = 0;
26790 if (info->total_size < 32767)
26791 frame_off = info->total_size;
26792 else if (need_r11)
26793 ptr_regno = 11;
26794 else if (info->cr_save_p
26795 || info->lr_save_p
26796 || info->first_fp_reg_save < 64
26797 || info->first_gp_reg_save < 32
26798 || info->altivec_size != 0
26799 || info->vrsave_size != 0
26800 || crtl->calls_eh_return)
26801 ptr_regno = 12;
26802 else
26804 /* The prologue won't be saving any regs so there is no need
26805 to set up a frame register to access any frame save area.
26806 We also won't be using frame_off anywhere below, but set
26807 the correct value anyway to protect against future
26808 changes to this function. */
26809 frame_off = info->total_size;
26811 if (ptr_regno != -1)
26813 /* Set up the frame offset to that needed by the first
26814 out-of-line save function. */
26815 START_USE (ptr_regno);
26816 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26817 frame_reg_rtx = ptr_reg;
26818 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26819 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26820 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26821 ptr_off = info->gp_save_offset + info->gp_size;
26822 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26823 ptr_off = info->altivec_save_offset + info->altivec_size;
26824 frame_off = -ptr_off;
26826 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26827 ptr_reg, ptr_off);
26828 if (REGNO (frame_reg_rtx) == 12)
26829 sp_adjust = 0;
26830 sp_off = info->total_size;
26831 if (frame_reg_rtx != sp_reg_rtx)
26832 rs6000_emit_stack_tie (frame_reg_rtx, false);
26835 /* If we use the link register, get it into r0. */
26836 if (!WORLD_SAVE_P (info) && info->lr_save_p
26837 && !cfun->machine->lr_is_wrapped_separately)
26839 rtx addr, reg, mem;
26841 reg = gen_rtx_REG (Pmode, 0);
26842 START_USE (0);
26843 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26844 RTX_FRAME_RELATED_P (insn) = 1;
26846 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26847 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26849 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26850 GEN_INT (info->lr_save_offset + frame_off));
26851 mem = gen_rtx_MEM (Pmode, addr);
26852 /* This should not be of rs6000_sr_alias_set, because of
26853 __builtin_return_address. */
26855 insn = emit_move_insn (mem, reg);
26856 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26857 NULL_RTX, NULL_RTX);
26858 END_USE (0);
26862 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26863 r12 will be needed by out-of-line gpr restore. */
26864 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26865 && !(strategy & (SAVE_INLINE_GPRS
26866 | SAVE_NOINLINE_GPRS_SAVES_LR))
26867 ? 11 : 12);
26868 if (!WORLD_SAVE_P (info)
26869 && info->cr_save_p
26870 && REGNO (frame_reg_rtx) != cr_save_regno
26871 && !(using_static_chain_p && cr_save_regno == 11)
26872 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26874 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26875 START_USE (cr_save_regno);
26876 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26879 /* Do any required saving of fpr's. If only one or two to save, do
26880 it ourselves. Otherwise, call function. */
26881 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26883 int offset = info->fp_save_offset + frame_off;
26884 for (int i = info->first_fp_reg_save; i < 64; i++)
26886 if (save_reg_p (i)
26887 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26888 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26889 sp_off - frame_off);
26891 offset += fp_reg_size;
26894 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26896 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26897 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26898 unsigned ptr_regno = ptr_regno_for_savres (sel);
26899 rtx ptr_reg = frame_reg_rtx;
26901 if (REGNO (frame_reg_rtx) == ptr_regno)
26902 gcc_checking_assert (frame_off == 0);
26903 else
26905 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26906 NOT_INUSE (ptr_regno);
26907 emit_insn (gen_add3_insn (ptr_reg,
26908 frame_reg_rtx, GEN_INT (frame_off)));
26910 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26911 info->fp_save_offset,
26912 info->lr_save_offset,
26913 DFmode, sel);
26914 rs6000_frame_related (insn, ptr_reg, sp_off,
26915 NULL_RTX, NULL_RTX);
26916 if (lr)
26917 END_USE (0);
26920 /* Save GPRs. This is done as a PARALLEL if we are using
26921 the store-multiple instructions. */
26922 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26924 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26925 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26926 unsigned ptr_regno = ptr_regno_for_savres (sel);
26927 rtx ptr_reg = frame_reg_rtx;
26928 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26929 int end_save = info->gp_save_offset + info->gp_size;
26930 int ptr_off;
26932 if (ptr_regno == 12)
26933 sp_adjust = 0;
26934 if (!ptr_set_up)
26935 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26937 /* Need to adjust r11 (r12) if we saved any FPRs. */
26938 if (end_save + frame_off != 0)
26940 rtx offset = GEN_INT (end_save + frame_off);
26942 if (ptr_set_up)
26943 frame_off = -end_save;
26944 else
26945 NOT_INUSE (ptr_regno);
26946 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26948 else if (!ptr_set_up)
26950 NOT_INUSE (ptr_regno);
26951 emit_move_insn (ptr_reg, frame_reg_rtx);
26953 ptr_off = -end_save;
26954 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26955 info->gp_save_offset + ptr_off,
26956 info->lr_save_offset + ptr_off,
26957 reg_mode, sel);
26958 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26959 NULL_RTX, NULL_RTX);
26960 if (lr)
26961 END_USE (0);
26963 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26965 rtvec p;
26966 int i;
26967 p = rtvec_alloc (32 - info->first_gp_reg_save);
26968 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26969 RTVEC_ELT (p, i)
26970 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26971 frame_reg_rtx,
26972 info->gp_save_offset + frame_off + reg_size * i);
26973 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26974 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26975 NULL_RTX, NULL_RTX);
26977 else if (!WORLD_SAVE_P (info))
26979 int offset = info->gp_save_offset + frame_off;
26980 for (int i = info->first_gp_reg_save; i < 32; i++)
26982 if (save_reg_p (i)
26983 && !cfun->machine->gpr_is_wrapped_separately[i])
26984 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26985 sp_off - frame_off);
26987 offset += reg_size;
26991 if (crtl->calls_eh_return)
26993 unsigned int i;
26994 rtvec p;
26996 for (i = 0; ; ++i)
26998 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26999 if (regno == INVALID_REGNUM)
27000 break;
27003 p = rtvec_alloc (i);
27005 for (i = 0; ; ++i)
27007 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27008 if (regno == INVALID_REGNUM)
27009 break;
27011 rtx set
27012 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27013 sp_reg_rtx,
27014 info->ehrd_offset + sp_off + reg_size * (int) i);
27015 RTVEC_ELT (p, i) = set;
27016 RTX_FRAME_RELATED_P (set) = 1;
27019 insn = emit_insn (gen_blockage ());
27020 RTX_FRAME_RELATED_P (insn) = 1;
27021 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27024 /* In AIX ABI we need to make sure r2 is really saved. */
27025 if (TARGET_AIX && crtl->calls_eh_return)
27027 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27028 rtx join_insn, note;
27029 rtx_insn *save_insn;
27030 long toc_restore_insn;
27032 tmp_reg = gen_rtx_REG (Pmode, 11);
27033 tmp_reg_si = gen_rtx_REG (SImode, 11);
27034 if (using_static_chain_p)
27036 START_USE (0);
27037 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27039 else
27040 START_USE (11);
27041 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27042 /* Peek at instruction to which this function returns. If it's
27043 restoring r2, then we know we've already saved r2. We can't
27044 unconditionally save r2 because the value we have will already
27045 be updated if we arrived at this function via a plt call or
27046 toc adjusting stub. */
27047 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27048 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27049 + RS6000_TOC_SAVE_SLOT);
27050 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27051 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27052 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27053 validate_condition_mode (EQ, CCUNSmode);
27054 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27055 emit_insn (gen_rtx_SET (compare_result,
27056 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27057 toc_save_done = gen_label_rtx ();
27058 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27059 gen_rtx_EQ (VOIDmode, compare_result,
27060 const0_rtx),
27061 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27062 pc_rtx);
27063 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27064 JUMP_LABEL (jump) = toc_save_done;
27065 LABEL_NUSES (toc_save_done) += 1;
27067 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27068 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27069 sp_off - frame_off);
27071 emit_label (toc_save_done);
27073 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27074 have a CFG that has different saves along different paths.
27075 Move the note to a dummy blockage insn, which describes that
27076 R2 is unconditionally saved after the label. */
27077 /* ??? An alternate representation might be a special insn pattern
27078 containing both the branch and the store. That might let the
27079 code that minimizes the number of DW_CFA_advance opcodes better
27080 freedom in placing the annotations. */
27081 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27082 if (note)
27083 remove_note (save_insn, note);
27084 else
27085 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27086 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27087 RTX_FRAME_RELATED_P (save_insn) = 0;
27089 join_insn = emit_insn (gen_blockage ());
27090 REG_NOTES (join_insn) = note;
27091 RTX_FRAME_RELATED_P (join_insn) = 1;
27093 if (using_static_chain_p)
27095 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27096 END_USE (0);
27098 else
27099 END_USE (11);
27102 /* Save CR if we use any that must be preserved. */
27103 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27105 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27106 GEN_INT (info->cr_save_offset + frame_off));
27107 rtx mem = gen_frame_mem (SImode, addr);
27109 /* If we didn't copy cr before, do so now using r0. */
27110 if (cr_save_rtx == NULL_RTX)
27112 START_USE (0);
27113 cr_save_rtx = gen_rtx_REG (SImode, 0);
27114 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27117 /* Saving CR requires a two-instruction sequence: one instruction
27118 to move the CR to a general-purpose register, and a second
27119 instruction that stores the GPR to memory.
27121 We do not emit any DWARF CFI records for the first of these,
27122 because we cannot properly represent the fact that CR is saved in
27123 a register. One reason is that we cannot express that multiple
27124 CR fields are saved; another reason is that on 64-bit, the size
27125 of the CR register in DWARF (4 bytes) differs from the size of
27126 a general-purpose register.
27128 This means if any intervening instruction were to clobber one of
27129 the call-saved CR fields, we'd have incorrect CFI. To prevent
27130 this from happening, we mark the store to memory as a use of
27131 those CR fields, which prevents any such instruction from being
27132 scheduled in between the two instructions. */
27133 rtx crsave_v[9];
27134 int n_crsave = 0;
27135 int i;
27137 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27138 for (i = 0; i < 8; i++)
27139 if (save_reg_p (CR0_REGNO + i))
27140 crsave_v[n_crsave++]
27141 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27143 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27144 gen_rtvec_v (n_crsave, crsave_v)));
27145 END_USE (REGNO (cr_save_rtx));
27147 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27148 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27149 so we need to construct a frame expression manually. */
27150 RTX_FRAME_RELATED_P (insn) = 1;
27152 /* Update address to be stack-pointer relative, like
27153 rs6000_frame_related would do. */
27154 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27155 GEN_INT (info->cr_save_offset + sp_off));
27156 mem = gen_frame_mem (SImode, addr);
27158 if (DEFAULT_ABI == ABI_ELFv2)
27160 /* In the ELFv2 ABI we generate separate CFI records for each
27161 CR field that was actually saved. They all point to the
27162 same 32-bit stack slot. */
27163 rtx crframe[8];
27164 int n_crframe = 0;
27166 for (i = 0; i < 8; i++)
27167 if (save_reg_p (CR0_REGNO + i))
27169 crframe[n_crframe]
27170 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27172 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27173 n_crframe++;
27176 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27177 gen_rtx_PARALLEL (VOIDmode,
27178 gen_rtvec_v (n_crframe, crframe)));
27180 else
27182 /* In other ABIs, by convention, we use a single CR regnum to
27183 represent the fact that all call-saved CR fields are saved.
27184 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27185 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27186 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27190 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27191 *separate* slots if the routine calls __builtin_eh_return, so
27192 that they can be independently restored by the unwinder. */
27193 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27195 int i, cr_off = info->ehcr_offset;
27196 rtx crsave;
27198 /* ??? We might get better performance by using multiple mfocrf
27199 instructions. */
27200 crsave = gen_rtx_REG (SImode, 0);
27201 emit_insn (gen_prologue_movesi_from_cr (crsave));
27203 for (i = 0; i < 8; i++)
27204 if (!call_used_regs[CR0_REGNO + i])
27206 rtvec p = rtvec_alloc (2);
27207 RTVEC_ELT (p, 0)
27208 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27209 RTVEC_ELT (p, 1)
27210 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27212 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27214 RTX_FRAME_RELATED_P (insn) = 1;
27215 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27216 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27217 sp_reg_rtx, cr_off + sp_off));
27219 cr_off += reg_size;
27223 /* Update stack and set back pointer unless this is V.4,
27224 for which it was done previously. */
27225 if (!WORLD_SAVE_P (info) && info->push_p
27226 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27228 rtx ptr_reg = NULL;
27229 int ptr_off = 0;
27231 /* If saving altivec regs we need to be able to address all save
27232 locations using a 16-bit offset. */
27233 if ((strategy & SAVE_INLINE_VRS) == 0
27234 || (info->altivec_size != 0
27235 && (info->altivec_save_offset + info->altivec_size - 16
27236 + info->total_size - frame_off) > 32767)
27237 || (info->vrsave_size != 0
27238 && (info->vrsave_save_offset
27239 + info->total_size - frame_off) > 32767))
27241 int sel = SAVRES_SAVE | SAVRES_VR;
27242 unsigned ptr_regno = ptr_regno_for_savres (sel);
27244 if (using_static_chain_p
27245 && ptr_regno == STATIC_CHAIN_REGNUM)
27246 ptr_regno = 12;
27247 if (REGNO (frame_reg_rtx) != ptr_regno)
27248 START_USE (ptr_regno);
27249 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27250 frame_reg_rtx = ptr_reg;
27251 ptr_off = info->altivec_save_offset + info->altivec_size;
27252 frame_off = -ptr_off;
27254 else if (REGNO (frame_reg_rtx) == 1)
27255 frame_off = info->total_size;
27256 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27257 ptr_reg, ptr_off);
27258 if (REGNO (frame_reg_rtx) == 12)
27259 sp_adjust = 0;
27260 sp_off = info->total_size;
27261 if (frame_reg_rtx != sp_reg_rtx)
27262 rs6000_emit_stack_tie (frame_reg_rtx, false);
27265 /* Set frame pointer, if needed. */
27266 if (frame_pointer_needed)
27268 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27269 sp_reg_rtx);
27270 RTX_FRAME_RELATED_P (insn) = 1;
27273 /* Save AltiVec registers if needed. Save here because the red zone does
27274 not always include AltiVec registers. */
27275 if (!WORLD_SAVE_P (info)
27276 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27278 int end_save = info->altivec_save_offset + info->altivec_size;
27279 int ptr_off;
27280 /* Oddly, the vector save/restore functions point r0 at the end
27281 of the save area, then use r11 or r12 to load offsets for
27282 [reg+reg] addressing. */
27283 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27284 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27285 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27287 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27288 NOT_INUSE (0);
27289 if (scratch_regno == 12)
27290 sp_adjust = 0;
27291 if (end_save + frame_off != 0)
27293 rtx offset = GEN_INT (end_save + frame_off);
27295 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27297 else
27298 emit_move_insn (ptr_reg, frame_reg_rtx);
27300 ptr_off = -end_save;
27301 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27302 info->altivec_save_offset + ptr_off,
27303 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27304 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27305 NULL_RTX, NULL_RTX);
27306 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27308 /* The oddity mentioned above clobbered our frame reg. */
27309 emit_move_insn (frame_reg_rtx, ptr_reg);
27310 frame_off = ptr_off;
27313 else if (!WORLD_SAVE_P (info)
27314 && info->altivec_size != 0)
27316 int i;
27318 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27319 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27321 rtx areg, savereg, mem;
27322 HOST_WIDE_INT offset;
27324 offset = (info->altivec_save_offset + frame_off
27325 + 16 * (i - info->first_altivec_reg_save));
27327 savereg = gen_rtx_REG (V4SImode, i);
27329 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27331 mem = gen_frame_mem (V4SImode,
27332 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27333 GEN_INT (offset)));
27334 insn = emit_insn (gen_rtx_SET (mem, savereg));
27335 areg = NULL_RTX;
27337 else
27339 NOT_INUSE (0);
27340 areg = gen_rtx_REG (Pmode, 0);
27341 emit_move_insn (areg, GEN_INT (offset));
27343 /* AltiVec addressing mode is [reg+reg]. */
27344 mem = gen_frame_mem (V4SImode,
27345 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27347 /* Rather than emitting a generic move, force use of the stvx
27348 instruction, which we always want on ISA 2.07 (power8) systems.
27349 In particular we don't want xxpermdi/stxvd2x for little
27350 endian. */
27351 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27354 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27355 areg, GEN_INT (offset));
27359 /* VRSAVE is a bit vector representing which AltiVec registers
27360 are used. The OS uses this to determine which vector
27361 registers to save on a context switch. We need to save
27362 VRSAVE on the stack frame, add whatever AltiVec registers we
27363 used in this function, and do the corresponding magic in the
27364 epilogue. */
27366 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27368 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27369 be using r12 as frame_reg_rtx and r11 as the static chain
27370 pointer for nested functions. */
27371 int save_regno = 12;
27372 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27373 && !using_static_chain_p)
27374 save_regno = 11;
27375 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27377 save_regno = 11;
27378 if (using_static_chain_p)
27379 save_regno = 0;
27381 NOT_INUSE (save_regno);
27383 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27386 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27387 if (!TARGET_SINGLE_PIC_BASE
27388 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27389 && !constant_pool_empty_p ())
27390 || (DEFAULT_ABI == ABI_V4
27391 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27392 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27394 /* If emit_load_toc_table will use the link register, we need to save
27395 it. We use R12 for this purpose because emit_load_toc_table
27396 can use register 0. This allows us to use a plain 'blr' to return
27397 from the procedure more often. */
27398 int save_LR_around_toc_setup = (TARGET_ELF
27399 && DEFAULT_ABI == ABI_V4
27400 && flag_pic
27401 && ! info->lr_save_p
27402 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27403 if (save_LR_around_toc_setup)
27405 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27406 rtx tmp = gen_rtx_REG (Pmode, 12);
27408 sp_adjust = 0;
27409 insn = emit_move_insn (tmp, lr);
27410 RTX_FRAME_RELATED_P (insn) = 1;
27412 rs6000_emit_load_toc_table (TRUE);
27414 insn = emit_move_insn (lr, tmp);
27415 add_reg_note (insn, REG_CFA_RESTORE, lr);
27416 RTX_FRAME_RELATED_P (insn) = 1;
27418 else
27419 rs6000_emit_load_toc_table (TRUE);
27422 #if TARGET_MACHO
27423 if (!TARGET_SINGLE_PIC_BASE
27424 && DEFAULT_ABI == ABI_DARWIN
27425 && flag_pic && crtl->uses_pic_offset_table)
27427 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27428 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27430 /* Save and restore LR locally around this call (in R0). */
27431 if (!info->lr_save_p)
27432 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27434 emit_insn (gen_load_macho_picbase (src));
27436 emit_move_insn (gen_rtx_REG (Pmode,
27437 RS6000_PIC_OFFSET_TABLE_REGNUM),
27438 lr);
27440 if (!info->lr_save_p)
27441 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27443 #endif
27445 /* If we need to, save the TOC register after doing the stack setup.
27446 Do not emit eh frame info for this save. The unwinder wants info,
27447 conceptually attached to instructions in this function, about
27448 register values in the caller of this function. This R2 may have
27449 already been changed from the value in the caller.
27450 We don't attempt to write accurate DWARF EH frame info for R2
27451 because code emitted by gcc for a (non-pointer) function call
27452 doesn't save and restore R2. Instead, R2 is managed out-of-line
27453 by a linker generated plt call stub when the function resides in
27454 a shared library. This behavior is costly to describe in DWARF,
27455 both in terms of the size of DWARF info and the time taken in the
27456 unwinder to interpret it. R2 changes, apart from the
27457 calls_eh_return case earlier in this function, are handled by
27458 linux-unwind.h frob_update_context. */
27459 if (rs6000_save_toc_in_prologue_p ())
27461 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27462 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27465 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27466 if (using_split_stack && split_stack_arg_pointer_used_p ())
27467 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27470 /* Output .extern statements for the save/restore routines we use. */
27472 static void
27473 rs6000_output_savres_externs (FILE *file)
27475 rs6000_stack_t *info = rs6000_stack_info ();
27477 if (TARGET_DEBUG_STACK)
27478 debug_stack_info (info);
27480 /* Write .extern for any function we will call to save and restore
27481 fp values. */
27482 if (info->first_fp_reg_save < 64
27483 && !TARGET_MACHO
27484 && !TARGET_ELF)
27486 char *name;
27487 int regno = info->first_fp_reg_save - 32;
27489 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27491 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27492 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27493 name = rs6000_savres_routine_name (regno, sel);
27494 fprintf (file, "\t.extern %s\n", name);
27496 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27498 bool lr = (info->savres_strategy
27499 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27500 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27501 name = rs6000_savres_routine_name (regno, sel);
27502 fprintf (file, "\t.extern %s\n", name);
27507 /* Write function prologue. */
27509 static void
27510 rs6000_output_function_prologue (FILE *file)
27512 if (!cfun->is_thunk)
27513 rs6000_output_savres_externs (file);
27515 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27516 immediately after the global entry point label. */
27517 if (rs6000_global_entry_point_needed_p ())
27519 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27521 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27523 if (TARGET_CMODEL != CMODEL_LARGE)
27525 /* In the small and medium code models, we assume the TOC is less
27526 2 GB away from the text section, so it can be computed via the
27527 following two-instruction sequence. */
27528 char buf[256];
27530 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27531 fprintf (file, "0:\taddis 2,12,.TOC.-");
27532 assemble_name (file, buf);
27533 fprintf (file, "@ha\n");
27534 fprintf (file, "\taddi 2,2,.TOC.-");
27535 assemble_name (file, buf);
27536 fprintf (file, "@l\n");
27538 else
27540 /* In the large code model, we allow arbitrary offsets between the
27541 TOC and the text section, so we have to load the offset from
27542 memory. The data field is emitted directly before the global
27543 entry point in rs6000_elf_declare_function_name. */
27544 char buf[256];
27546 #ifdef HAVE_AS_ENTRY_MARKERS
27547 /* If supported by the linker, emit a marker relocation. If the
27548 total code size of the final executable or shared library
27549 happens to fit into 2 GB after all, the linker will replace
27550 this code sequence with the sequence for the small or medium
27551 code model. */
27552 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27553 #endif
27554 fprintf (file, "\tld 2,");
27555 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27556 assemble_name (file, buf);
27557 fprintf (file, "-");
27558 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27559 assemble_name (file, buf);
27560 fprintf (file, "(12)\n");
27561 fprintf (file, "\tadd 2,2,12\n");
27564 fputs ("\t.localentry\t", file);
27565 assemble_name (file, name);
27566 fputs (",.-", file);
27567 assemble_name (file, name);
27568 fputs ("\n", file);
27571 /* Output -mprofile-kernel code. This needs to be done here instead of
27572 in output_function_profile since it must go after the ELFv2 ABI
27573 local entry point. */
27574 if (TARGET_PROFILE_KERNEL && crtl->profile)
27576 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27577 gcc_assert (!TARGET_32BIT);
27579 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27581 /* In the ELFv2 ABI we have no compiler stack word. It must be
27582 the resposibility of _mcount to preserve the static chain
27583 register if required. */
27584 if (DEFAULT_ABI != ABI_ELFv2
27585 && cfun->static_chain_decl != NULL)
27587 asm_fprintf (file, "\tstd %s,24(%s)\n",
27588 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27589 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27590 asm_fprintf (file, "\tld %s,24(%s)\n",
27591 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27593 else
27594 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27597 rs6000_pic_labelno++;
27600 /* -mprofile-kernel code calls mcount before the function prolog,
27601 so a profiled leaf function should stay a leaf function. */
27602 static bool
27603 rs6000_keep_leaf_when_profiled ()
27605 return TARGET_PROFILE_KERNEL;
27608 /* Non-zero if vmx regs are restored before the frame pop, zero if
27609 we restore after the pop when possible. */
27610 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27612 /* Restoring cr is a two step process: loading a reg from the frame
27613 save, then moving the reg to cr. For ABI_V4 we must let the
27614 unwinder know that the stack location is no longer valid at or
27615 before the stack deallocation, but we can't emit a cfa_restore for
27616 cr at the stack deallocation like we do for other registers.
27617 The trouble is that it is possible for the move to cr to be
27618 scheduled after the stack deallocation. So say exactly where cr
27619 is located on each of the two insns. */
27621 static rtx
27622 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27624 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27625 rtx reg = gen_rtx_REG (SImode, regno);
27626 rtx_insn *insn = emit_move_insn (reg, mem);
27628 if (!exit_func && DEFAULT_ABI == ABI_V4)
27630 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27631 rtx set = gen_rtx_SET (reg, cr);
27633 add_reg_note (insn, REG_CFA_REGISTER, set);
27634 RTX_FRAME_RELATED_P (insn) = 1;
27636 return reg;
27639 /* Reload CR from REG. */
27641 static void
27642 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27644 int count = 0;
27645 int i;
27647 if (using_mfcr_multiple)
27649 for (i = 0; i < 8; i++)
27650 if (save_reg_p (CR0_REGNO + i))
27651 count++;
27652 gcc_assert (count);
27655 if (using_mfcr_multiple && count > 1)
27657 rtx_insn *insn;
27658 rtvec p;
27659 int ndx;
27661 p = rtvec_alloc (count);
27663 ndx = 0;
27664 for (i = 0; i < 8; i++)
27665 if (save_reg_p (CR0_REGNO + i))
27667 rtvec r = rtvec_alloc (2);
27668 RTVEC_ELT (r, 0) = reg;
27669 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27670 RTVEC_ELT (p, ndx) =
27671 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27672 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27673 ndx++;
27675 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27676 gcc_assert (ndx == count);
27678 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27679 CR field separately. */
27680 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27682 for (i = 0; i < 8; i++)
27683 if (save_reg_p (CR0_REGNO + i))
27684 add_reg_note (insn, REG_CFA_RESTORE,
27685 gen_rtx_REG (SImode, CR0_REGNO + i));
27687 RTX_FRAME_RELATED_P (insn) = 1;
27690 else
27691 for (i = 0; i < 8; i++)
27692 if (save_reg_p (CR0_REGNO + i))
27694 rtx insn = emit_insn (gen_movsi_to_cr_one
27695 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27697 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27698 CR field separately, attached to the insn that in fact
27699 restores this particular CR field. */
27700 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27702 add_reg_note (insn, REG_CFA_RESTORE,
27703 gen_rtx_REG (SImode, CR0_REGNO + i));
27705 RTX_FRAME_RELATED_P (insn) = 1;
27709 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27710 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27711 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27713 rtx_insn *insn = get_last_insn ();
27714 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27716 add_reg_note (insn, REG_CFA_RESTORE, cr);
27717 RTX_FRAME_RELATED_P (insn) = 1;
27721 /* Like cr, the move to lr instruction can be scheduled after the
27722 stack deallocation, but unlike cr, its stack frame save is still
27723 valid. So we only need to emit the cfa_restore on the correct
27724 instruction. */
27726 static void
27727 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27729 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27730 rtx reg = gen_rtx_REG (Pmode, regno);
27732 emit_move_insn (reg, mem);
27735 static void
27736 restore_saved_lr (int regno, bool exit_func)
27738 rtx reg = gen_rtx_REG (Pmode, regno);
27739 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27740 rtx_insn *insn = emit_move_insn (lr, reg);
27742 if (!exit_func && flag_shrink_wrap)
27744 add_reg_note (insn, REG_CFA_RESTORE, lr);
27745 RTX_FRAME_RELATED_P (insn) = 1;
27749 static rtx
27750 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27752 if (DEFAULT_ABI == ABI_ELFv2)
27754 int i;
27755 for (i = 0; i < 8; i++)
27756 if (save_reg_p (CR0_REGNO + i))
27758 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27759 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27760 cfa_restores);
27763 else if (info->cr_save_p)
27764 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27765 gen_rtx_REG (SImode, CR2_REGNO),
27766 cfa_restores);
27768 if (info->lr_save_p)
27769 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27770 gen_rtx_REG (Pmode, LR_REGNO),
27771 cfa_restores);
27772 return cfa_restores;
27775 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27776 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27777 below stack pointer not cloberred by signals. */
27779 static inline bool
27780 offset_below_red_zone_p (HOST_WIDE_INT offset)
27782 return offset < (DEFAULT_ABI == ABI_V4
27784 : TARGET_32BIT ? -220 : -288);
27787 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27789 static void
27790 emit_cfa_restores (rtx cfa_restores)
27792 rtx_insn *insn = get_last_insn ();
27793 rtx *loc = &REG_NOTES (insn);
27795 while (*loc)
27796 loc = &XEXP (*loc, 1);
27797 *loc = cfa_restores;
27798 RTX_FRAME_RELATED_P (insn) = 1;
27801 /* Emit function epilogue as insns. */
27803 void
27804 rs6000_emit_epilogue (int sibcall)
27806 rs6000_stack_t *info;
27807 int restoring_GPRs_inline;
27808 int restoring_FPRs_inline;
27809 int using_load_multiple;
27810 int using_mtcr_multiple;
27811 int use_backchain_to_restore_sp;
27812 int restore_lr;
27813 int strategy;
27814 HOST_WIDE_INT frame_off = 0;
27815 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27816 rtx frame_reg_rtx = sp_reg_rtx;
27817 rtx cfa_restores = NULL_RTX;
27818 rtx insn;
27819 rtx cr_save_reg = NULL_RTX;
27820 machine_mode reg_mode = Pmode;
27821 int reg_size = TARGET_32BIT ? 4 : 8;
27822 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27823 ? DFmode : SFmode;
27824 int fp_reg_size = 8;
27825 int i;
27826 bool exit_func;
27827 unsigned ptr_regno;
27829 info = rs6000_stack_info ();
27831 strategy = info->savres_strategy;
27832 using_load_multiple = strategy & REST_MULTIPLE;
27833 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27834 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27835 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27836 || rs6000_cpu == PROCESSOR_PPC603
27837 || rs6000_cpu == PROCESSOR_PPC750
27838 || optimize_size);
27839 /* Restore via the backchain when we have a large frame, since this
27840 is more efficient than an addis, addi pair. The second condition
27841 here will not trigger at the moment; We don't actually need a
27842 frame pointer for alloca, but the generic parts of the compiler
27843 give us one anyway. */
27844 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27845 ? info->lr_save_offset
27846 : 0) > 32767
27847 || (cfun->calls_alloca
27848 && !frame_pointer_needed));
27849 restore_lr = (info->lr_save_p
27850 && (restoring_FPRs_inline
27851 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27852 && (restoring_GPRs_inline
27853 || info->first_fp_reg_save < 64)
27854 && !cfun->machine->lr_is_wrapped_separately);
27857 if (WORLD_SAVE_P (info))
27859 int i, j;
27860 char rname[30];
27861 const char *alloc_rname;
27862 rtvec p;
27864 /* eh_rest_world_r10 will return to the location saved in the LR
27865 stack slot (which is not likely to be our caller.)
27866 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27867 rest_world is similar, except any R10 parameter is ignored.
27868 The exception-handling stuff that was here in 2.95 is no
27869 longer necessary. */
27871 p = rtvec_alloc (9
27872 + 32 - info->first_gp_reg_save
27873 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27874 + 63 + 1 - info->first_fp_reg_save);
27876 strcpy (rname, ((crtl->calls_eh_return) ?
27877 "*eh_rest_world_r10" : "*rest_world"));
27878 alloc_rname = ggc_strdup (rname);
27880 j = 0;
27881 RTVEC_ELT (p, j++) = ret_rtx;
27882 RTVEC_ELT (p, j++)
27883 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27884 /* The instruction pattern requires a clobber here;
27885 it is shared with the restVEC helper. */
27886 RTVEC_ELT (p, j++)
27887 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27890 /* CR register traditionally saved as CR2. */
27891 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27892 RTVEC_ELT (p, j++)
27893 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27894 if (flag_shrink_wrap)
27896 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27897 gen_rtx_REG (Pmode, LR_REGNO),
27898 cfa_restores);
27899 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27903 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27905 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27906 RTVEC_ELT (p, j++)
27907 = gen_frame_load (reg,
27908 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27909 if (flag_shrink_wrap
27910 && save_reg_p (info->first_gp_reg_save + i))
27911 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27913 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27915 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27916 RTVEC_ELT (p, j++)
27917 = gen_frame_load (reg,
27918 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27919 if (flag_shrink_wrap
27920 && save_reg_p (info->first_altivec_reg_save + i))
27921 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27923 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27925 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27926 ? DFmode : SFmode),
27927 info->first_fp_reg_save + i);
27928 RTVEC_ELT (p, j++)
27929 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27930 if (flag_shrink_wrap
27931 && save_reg_p (info->first_fp_reg_save + i))
27932 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27934 RTVEC_ELT (p, j++)
27935 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27936 RTVEC_ELT (p, j++)
27937 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27938 RTVEC_ELT (p, j++)
27939 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27940 RTVEC_ELT (p, j++)
27941 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27942 RTVEC_ELT (p, j++)
27943 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27944 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27946 if (flag_shrink_wrap)
27948 REG_NOTES (insn) = cfa_restores;
27949 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27950 RTX_FRAME_RELATED_P (insn) = 1;
27952 return;
27955 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27956 if (info->push_p)
27957 frame_off = info->total_size;
27959 /* Restore AltiVec registers if we must do so before adjusting the
27960 stack. */
27961 if (info->altivec_size != 0
27962 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27963 || (DEFAULT_ABI != ABI_V4
27964 && offset_below_red_zone_p (info->altivec_save_offset))))
27966 int i;
27967 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27969 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27970 if (use_backchain_to_restore_sp)
27972 int frame_regno = 11;
27974 if ((strategy & REST_INLINE_VRS) == 0)
27976 /* Of r11 and r12, select the one not clobbered by an
27977 out-of-line restore function for the frame register. */
27978 frame_regno = 11 + 12 - scratch_regno;
27980 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27981 emit_move_insn (frame_reg_rtx,
27982 gen_rtx_MEM (Pmode, sp_reg_rtx));
27983 frame_off = 0;
27985 else if (frame_pointer_needed)
27986 frame_reg_rtx = hard_frame_pointer_rtx;
27988 if ((strategy & REST_INLINE_VRS) == 0)
27990 int end_save = info->altivec_save_offset + info->altivec_size;
27991 int ptr_off;
27992 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27993 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27995 if (end_save + frame_off != 0)
27997 rtx offset = GEN_INT (end_save + frame_off);
27999 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28001 else
28002 emit_move_insn (ptr_reg, frame_reg_rtx);
28004 ptr_off = -end_save;
28005 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28006 info->altivec_save_offset + ptr_off,
28007 0, V4SImode, SAVRES_VR);
28009 else
28011 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28012 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28014 rtx addr, areg, mem, insn;
28015 rtx reg = gen_rtx_REG (V4SImode, i);
28016 HOST_WIDE_INT offset
28017 = (info->altivec_save_offset + frame_off
28018 + 16 * (i - info->first_altivec_reg_save));
28020 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28022 mem = gen_frame_mem (V4SImode,
28023 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28024 GEN_INT (offset)));
28025 insn = gen_rtx_SET (reg, mem);
28027 else
28029 areg = gen_rtx_REG (Pmode, 0);
28030 emit_move_insn (areg, GEN_INT (offset));
28032 /* AltiVec addressing mode is [reg+reg]. */
28033 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28034 mem = gen_frame_mem (V4SImode, addr);
28036 /* Rather than emitting a generic move, force use of the
28037 lvx instruction, which we always want. In particular we
28038 don't want lxvd2x/xxpermdi for little endian. */
28039 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28042 (void) emit_insn (insn);
28046 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28047 if (((strategy & REST_INLINE_VRS) == 0
28048 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28049 && (flag_shrink_wrap
28050 || (offset_below_red_zone_p
28051 (info->altivec_save_offset
28052 + 16 * (i - info->first_altivec_reg_save))))
28053 && save_reg_p (i))
28055 rtx reg = gen_rtx_REG (V4SImode, i);
28056 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28060 /* Restore VRSAVE if we must do so before adjusting the stack. */
28061 if (info->vrsave_size != 0
28062 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28063 || (DEFAULT_ABI != ABI_V4
28064 && offset_below_red_zone_p (info->vrsave_save_offset))))
28066 rtx reg;
28068 if (frame_reg_rtx == sp_reg_rtx)
28070 if (use_backchain_to_restore_sp)
28072 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28073 emit_move_insn (frame_reg_rtx,
28074 gen_rtx_MEM (Pmode, sp_reg_rtx));
28075 frame_off = 0;
28077 else if (frame_pointer_needed)
28078 frame_reg_rtx = hard_frame_pointer_rtx;
28081 reg = gen_rtx_REG (SImode, 12);
28082 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28083 info->vrsave_save_offset + frame_off));
28085 emit_insn (generate_set_vrsave (reg, info, 1));
28088 insn = NULL_RTX;
28089 /* If we have a large stack frame, restore the old stack pointer
28090 using the backchain. */
28091 if (use_backchain_to_restore_sp)
28093 if (frame_reg_rtx == sp_reg_rtx)
28095 /* Under V.4, don't reset the stack pointer until after we're done
28096 loading the saved registers. */
28097 if (DEFAULT_ABI == ABI_V4)
28098 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28100 insn = emit_move_insn (frame_reg_rtx,
28101 gen_rtx_MEM (Pmode, sp_reg_rtx));
28102 frame_off = 0;
28104 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28105 && DEFAULT_ABI == ABI_V4)
28106 /* frame_reg_rtx has been set up by the altivec restore. */
28108 else
28110 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28111 frame_reg_rtx = sp_reg_rtx;
28114 /* If we have a frame pointer, we can restore the old stack pointer
28115 from it. */
28116 else if (frame_pointer_needed)
28118 frame_reg_rtx = sp_reg_rtx;
28119 if (DEFAULT_ABI == ABI_V4)
28120 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28121 /* Prevent reordering memory accesses against stack pointer restore. */
28122 else if (cfun->calls_alloca
28123 || offset_below_red_zone_p (-info->total_size))
28124 rs6000_emit_stack_tie (frame_reg_rtx, true);
28126 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28127 GEN_INT (info->total_size)));
28128 frame_off = 0;
28130 else if (info->push_p
28131 && DEFAULT_ABI != ABI_V4
28132 && !crtl->calls_eh_return)
28134 /* Prevent reordering memory accesses against stack pointer restore. */
28135 if (cfun->calls_alloca
28136 || offset_below_red_zone_p (-info->total_size))
28137 rs6000_emit_stack_tie (frame_reg_rtx, false);
28138 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28139 GEN_INT (info->total_size)));
28140 frame_off = 0;
28142 if (insn && frame_reg_rtx == sp_reg_rtx)
28144 if (cfa_restores)
28146 REG_NOTES (insn) = cfa_restores;
28147 cfa_restores = NULL_RTX;
28149 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28150 RTX_FRAME_RELATED_P (insn) = 1;
28153 /* Restore AltiVec registers if we have not done so already. */
28154 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28155 && info->altivec_size != 0
28156 && (DEFAULT_ABI == ABI_V4
28157 || !offset_below_red_zone_p (info->altivec_save_offset)))
28159 int i;
28161 if ((strategy & REST_INLINE_VRS) == 0)
28163 int end_save = info->altivec_save_offset + info->altivec_size;
28164 int ptr_off;
28165 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28166 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28167 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28169 if (end_save + frame_off != 0)
28171 rtx offset = GEN_INT (end_save + frame_off);
28173 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28175 else
28176 emit_move_insn (ptr_reg, frame_reg_rtx);
28178 ptr_off = -end_save;
28179 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28180 info->altivec_save_offset + ptr_off,
28181 0, V4SImode, SAVRES_VR);
28182 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28184 /* Frame reg was clobbered by out-of-line save. Restore it
28185 from ptr_reg, and if we are calling out-of-line gpr or
28186 fpr restore set up the correct pointer and offset. */
28187 unsigned newptr_regno = 1;
28188 if (!restoring_GPRs_inline)
28190 bool lr = info->gp_save_offset + info->gp_size == 0;
28191 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28192 newptr_regno = ptr_regno_for_savres (sel);
28193 end_save = info->gp_save_offset + info->gp_size;
28195 else if (!restoring_FPRs_inline)
28197 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28198 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28199 newptr_regno = ptr_regno_for_savres (sel);
28200 end_save = info->fp_save_offset + info->fp_size;
28203 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28204 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28206 if (end_save + ptr_off != 0)
28208 rtx offset = GEN_INT (end_save + ptr_off);
28210 frame_off = -end_save;
28211 if (TARGET_32BIT)
28212 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28213 ptr_reg, offset));
28214 else
28215 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28216 ptr_reg, offset));
28218 else
28220 frame_off = ptr_off;
28221 emit_move_insn (frame_reg_rtx, ptr_reg);
28225 else
28227 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28228 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28230 rtx addr, areg, mem, insn;
28231 rtx reg = gen_rtx_REG (V4SImode, i);
28232 HOST_WIDE_INT offset
28233 = (info->altivec_save_offset + frame_off
28234 + 16 * (i - info->first_altivec_reg_save));
28236 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28238 mem = gen_frame_mem (V4SImode,
28239 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28240 GEN_INT (offset)));
28241 insn = gen_rtx_SET (reg, mem);
28243 else
28245 areg = gen_rtx_REG (Pmode, 0);
28246 emit_move_insn (areg, GEN_INT (offset));
28248 /* AltiVec addressing mode is [reg+reg]. */
28249 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28250 mem = gen_frame_mem (V4SImode, addr);
28252 /* Rather than emitting a generic move, force use of the
28253 lvx instruction, which we always want. In particular we
28254 don't want lxvd2x/xxpermdi for little endian. */
28255 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28258 (void) emit_insn (insn);
28262 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28263 if (((strategy & REST_INLINE_VRS) == 0
28264 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28265 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28266 && save_reg_p (i))
28268 rtx reg = gen_rtx_REG (V4SImode, i);
28269 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28273 /* Restore VRSAVE if we have not done so already. */
28274 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28275 && info->vrsave_size != 0
28276 && (DEFAULT_ABI == ABI_V4
28277 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28279 rtx reg;
28281 reg = gen_rtx_REG (SImode, 12);
28282 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28283 info->vrsave_save_offset + frame_off));
28285 emit_insn (generate_set_vrsave (reg, info, 1));
28288 /* If we exit by an out-of-line restore function on ABI_V4 then that
28289 function will deallocate the stack, so we don't need to worry
28290 about the unwinder restoring cr from an invalid stack frame
28291 location. */
28292 exit_func = (!restoring_FPRs_inline
28293 || (!restoring_GPRs_inline
28294 && info->first_fp_reg_save == 64));
28296 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28297 *separate* slots if the routine calls __builtin_eh_return, so
28298 that they can be independently restored by the unwinder. */
28299 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28301 int i, cr_off = info->ehcr_offset;
28303 for (i = 0; i < 8; i++)
28304 if (!call_used_regs[CR0_REGNO + i])
28306 rtx reg = gen_rtx_REG (SImode, 0);
28307 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28308 cr_off + frame_off));
28310 insn = emit_insn (gen_movsi_to_cr_one
28311 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28313 if (!exit_func && flag_shrink_wrap)
28315 add_reg_note (insn, REG_CFA_RESTORE,
28316 gen_rtx_REG (SImode, CR0_REGNO + i));
28318 RTX_FRAME_RELATED_P (insn) = 1;
28321 cr_off += reg_size;
28325 /* Get the old lr if we saved it. If we are restoring registers
28326 out-of-line, then the out-of-line routines can do this for us. */
28327 if (restore_lr && restoring_GPRs_inline)
28328 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28330 /* Get the old cr if we saved it. */
28331 if (info->cr_save_p)
28333 unsigned cr_save_regno = 12;
28335 if (!restoring_GPRs_inline)
28337 /* Ensure we don't use the register used by the out-of-line
28338 gpr register restore below. */
28339 bool lr = info->gp_save_offset + info->gp_size == 0;
28340 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28341 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28343 if (gpr_ptr_regno == 12)
28344 cr_save_regno = 11;
28345 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28347 else if (REGNO (frame_reg_rtx) == 12)
28348 cr_save_regno = 11;
28350 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28351 info->cr_save_offset + frame_off,
28352 exit_func);
28355 /* Set LR here to try to overlap restores below. */
28356 if (restore_lr && restoring_GPRs_inline)
28357 restore_saved_lr (0, exit_func);
28359 /* Load exception handler data registers, if needed. */
28360 if (crtl->calls_eh_return)
28362 unsigned int i, regno;
28364 if (TARGET_AIX)
28366 rtx reg = gen_rtx_REG (reg_mode, 2);
28367 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28368 frame_off + RS6000_TOC_SAVE_SLOT));
28371 for (i = 0; ; ++i)
28373 rtx mem;
28375 regno = EH_RETURN_DATA_REGNO (i);
28376 if (regno == INVALID_REGNUM)
28377 break;
28379 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28380 info->ehrd_offset + frame_off
28381 + reg_size * (int) i);
28383 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28387 /* Restore GPRs. This is done as a PARALLEL if we are using
28388 the load-multiple instructions. */
28389 if (!restoring_GPRs_inline)
28391 /* We are jumping to an out-of-line function. */
28392 rtx ptr_reg;
28393 int end_save = info->gp_save_offset + info->gp_size;
28394 bool can_use_exit = end_save == 0;
28395 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28396 int ptr_off;
28398 /* Emit stack reset code if we need it. */
28399 ptr_regno = ptr_regno_for_savres (sel);
28400 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28401 if (can_use_exit)
28402 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28403 else if (end_save + frame_off != 0)
28404 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28405 GEN_INT (end_save + frame_off)));
28406 else if (REGNO (frame_reg_rtx) != ptr_regno)
28407 emit_move_insn (ptr_reg, frame_reg_rtx);
28408 if (REGNO (frame_reg_rtx) == ptr_regno)
28409 frame_off = -end_save;
28411 if (can_use_exit && info->cr_save_p)
28412 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28414 ptr_off = -end_save;
28415 rs6000_emit_savres_rtx (info, ptr_reg,
28416 info->gp_save_offset + ptr_off,
28417 info->lr_save_offset + ptr_off,
28418 reg_mode, sel);
28420 else if (using_load_multiple)
28422 rtvec p;
28423 p = rtvec_alloc (32 - info->first_gp_reg_save);
28424 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28425 RTVEC_ELT (p, i)
28426 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28427 frame_reg_rtx,
28428 info->gp_save_offset + frame_off + reg_size * i);
28429 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28431 else
28433 int offset = info->gp_save_offset + frame_off;
28434 for (i = info->first_gp_reg_save; i < 32; i++)
28436 if (save_reg_p (i)
28437 && !cfun->machine->gpr_is_wrapped_separately[i])
28439 rtx reg = gen_rtx_REG (reg_mode, i);
28440 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28443 offset += reg_size;
28447 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28449 /* If the frame pointer was used then we can't delay emitting
28450 a REG_CFA_DEF_CFA note. This must happen on the insn that
28451 restores the frame pointer, r31. We may have already emitted
28452 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28453 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28454 be harmless if emitted. */
28455 if (frame_pointer_needed)
28457 insn = get_last_insn ();
28458 add_reg_note (insn, REG_CFA_DEF_CFA,
28459 plus_constant (Pmode, frame_reg_rtx, frame_off));
28460 RTX_FRAME_RELATED_P (insn) = 1;
28463 /* Set up cfa_restores. We always need these when
28464 shrink-wrapping. If not shrink-wrapping then we only need
28465 the cfa_restore when the stack location is no longer valid.
28466 The cfa_restores must be emitted on or before the insn that
28467 invalidates the stack, and of course must not be emitted
28468 before the insn that actually does the restore. The latter
28469 is why it is a bad idea to emit the cfa_restores as a group
28470 on the last instruction here that actually does a restore:
28471 That insn may be reordered with respect to others doing
28472 restores. */
28473 if (flag_shrink_wrap
28474 && !restoring_GPRs_inline
28475 && info->first_fp_reg_save == 64)
28476 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28478 for (i = info->first_gp_reg_save; i < 32; i++)
28479 if (save_reg_p (i)
28480 && !cfun->machine->gpr_is_wrapped_separately[i])
28482 rtx reg = gen_rtx_REG (reg_mode, i);
28483 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28487 if (!restoring_GPRs_inline
28488 && info->first_fp_reg_save == 64)
28490 /* We are jumping to an out-of-line function. */
28491 if (cfa_restores)
28492 emit_cfa_restores (cfa_restores);
28493 return;
28496 if (restore_lr && !restoring_GPRs_inline)
28498 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28499 restore_saved_lr (0, exit_func);
28502 /* Restore fpr's if we need to do it without calling a function. */
28503 if (restoring_FPRs_inline)
28505 int offset = info->fp_save_offset + frame_off;
28506 for (i = info->first_fp_reg_save; i < 64; i++)
28508 if (save_reg_p (i)
28509 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28511 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28512 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28513 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28514 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28515 cfa_restores);
28518 offset += fp_reg_size;
28522 /* If we saved cr, restore it here. Just those that were used. */
28523 if (info->cr_save_p)
28524 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28526 /* If this is V.4, unwind the stack pointer after all of the loads
28527 have been done, or set up r11 if we are restoring fp out of line. */
28528 ptr_regno = 1;
28529 if (!restoring_FPRs_inline)
28531 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28532 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28533 ptr_regno = ptr_regno_for_savres (sel);
28536 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28537 if (REGNO (frame_reg_rtx) == ptr_regno)
28538 frame_off = 0;
28540 if (insn && restoring_FPRs_inline)
28542 if (cfa_restores)
28544 REG_NOTES (insn) = cfa_restores;
28545 cfa_restores = NULL_RTX;
28547 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28548 RTX_FRAME_RELATED_P (insn) = 1;
28551 if (crtl->calls_eh_return)
28553 rtx sa = EH_RETURN_STACKADJ_RTX;
28554 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28557 if (!sibcall && restoring_FPRs_inline)
28559 if (cfa_restores)
28561 /* We can't hang the cfa_restores off a simple return,
28562 since the shrink-wrap code sometimes uses an existing
28563 return. This means there might be a path from
28564 pre-prologue code to this return, and dwarf2cfi code
28565 wants the eh_frame unwinder state to be the same on
28566 all paths to any point. So we need to emit the
28567 cfa_restores before the return. For -m64 we really
28568 don't need epilogue cfa_restores at all, except for
28569 this irritating dwarf2cfi with shrink-wrap
28570 requirement; The stack red-zone means eh_frame info
28571 from the prologue telling the unwinder to restore
28572 from the stack is perfectly good right to the end of
28573 the function. */
28574 emit_insn (gen_blockage ());
28575 emit_cfa_restores (cfa_restores);
28576 cfa_restores = NULL_RTX;
28579 emit_jump_insn (targetm.gen_simple_return ());
28582 if (!sibcall && !restoring_FPRs_inline)
28584 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28585 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28586 int elt = 0;
28587 RTVEC_ELT (p, elt++) = ret_rtx;
28588 if (lr)
28589 RTVEC_ELT (p, elt++)
28590 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28592 /* We have to restore more than two FP registers, so branch to the
28593 restore function. It will return to our caller. */
28594 int i;
28595 int reg;
28596 rtx sym;
28598 if (flag_shrink_wrap)
28599 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28601 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28602 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28603 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28604 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28606 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28608 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28610 RTVEC_ELT (p, elt++)
28611 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28612 if (flag_shrink_wrap
28613 && save_reg_p (info->first_fp_reg_save + i))
28614 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28617 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28620 if (cfa_restores)
28622 if (sibcall)
28623 /* Ensure the cfa_restores are hung off an insn that won't
28624 be reordered above other restores. */
28625 emit_insn (gen_blockage ());
28627 emit_cfa_restores (cfa_restores);
28631 /* Write function epilogue. */
28633 static void
28634 rs6000_output_function_epilogue (FILE *file)
28636 #if TARGET_MACHO
28637 macho_branch_islands ();
28640 rtx_insn *insn = get_last_insn ();
28641 rtx_insn *deleted_debug_label = NULL;
28643 /* Mach-O doesn't support labels at the end of objects, so if
28644 it looks like we might want one, take special action.
28646 First, collect any sequence of deleted debug labels. */
28647 while (insn
28648 && NOTE_P (insn)
28649 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28651 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28652 notes only, instead set their CODE_LABEL_NUMBER to -1,
28653 otherwise there would be code generation differences
28654 in between -g and -g0. */
28655 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28656 deleted_debug_label = insn;
28657 insn = PREV_INSN (insn);
28660 /* Second, if we have:
28661 label:
28662 barrier
28663 then this needs to be detected, so skip past the barrier. */
28665 if (insn && BARRIER_P (insn))
28666 insn = PREV_INSN (insn);
28668 /* Up to now we've only seen notes or barriers. */
28669 if (insn)
28671 if (LABEL_P (insn)
28672 || (NOTE_P (insn)
28673 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28674 /* Trailing label: <barrier>. */
28675 fputs ("\tnop\n", file);
28676 else
28678 /* Lastly, see if we have a completely empty function body. */
28679 while (insn && ! INSN_P (insn))
28680 insn = PREV_INSN (insn);
28681 /* If we don't find any insns, we've got an empty function body;
28682 I.e. completely empty - without a return or branch. This is
28683 taken as the case where a function body has been removed
28684 because it contains an inline __builtin_unreachable(). GCC
28685 states that reaching __builtin_unreachable() means UB so we're
28686 not obliged to do anything special; however, we want
28687 non-zero-sized function bodies. To meet this, and help the
28688 user out, let's trap the case. */
28689 if (insn == NULL)
28690 fputs ("\ttrap\n", file);
28693 else if (deleted_debug_label)
28694 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28695 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28696 CODE_LABEL_NUMBER (insn) = -1;
28698 #endif
28700 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28701 on its format.
28703 We don't output a traceback table if -finhibit-size-directive was
28704 used. The documentation for -finhibit-size-directive reads
28705 ``don't output a @code{.size} assembler directive, or anything
28706 else that would cause trouble if the function is split in the
28707 middle, and the two halves are placed at locations far apart in
28708 memory.'' The traceback table has this property, since it
28709 includes the offset from the start of the function to the
28710 traceback table itself.
28712 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28713 different traceback table. */
28714 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28715 && ! flag_inhibit_size_directive
28716 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28718 const char *fname = NULL;
28719 const char *language_string = lang_hooks.name;
28720 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28721 int i;
28722 int optional_tbtab;
28723 rs6000_stack_t *info = rs6000_stack_info ();
28725 if (rs6000_traceback == traceback_full)
28726 optional_tbtab = 1;
28727 else if (rs6000_traceback == traceback_part)
28728 optional_tbtab = 0;
28729 else
28730 optional_tbtab = !optimize_size && !TARGET_ELF;
28732 if (optional_tbtab)
28734 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28735 while (*fname == '.') /* V.4 encodes . in the name */
28736 fname++;
28738 /* Need label immediately before tbtab, so we can compute
28739 its offset from the function start. */
28740 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28741 ASM_OUTPUT_LABEL (file, fname);
28744 /* The .tbtab pseudo-op can only be used for the first eight
28745 expressions, since it can't handle the possibly variable
28746 length fields that follow. However, if you omit the optional
28747 fields, the assembler outputs zeros for all optional fields
28748 anyways, giving each variable length field is minimum length
28749 (as defined in sys/debug.h). Thus we can not use the .tbtab
28750 pseudo-op at all. */
28752 /* An all-zero word flags the start of the tbtab, for debuggers
28753 that have to find it by searching forward from the entry
28754 point or from the current pc. */
28755 fputs ("\t.long 0\n", file);
28757 /* Tbtab format type. Use format type 0. */
28758 fputs ("\t.byte 0,", file);
28760 /* Language type. Unfortunately, there does not seem to be any
28761 official way to discover the language being compiled, so we
28762 use language_string.
28763 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28764 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28765 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28766 either, so for now use 0. */
28767 if (lang_GNU_C ()
28768 || ! strcmp (language_string, "GNU GIMPLE")
28769 || ! strcmp (language_string, "GNU Go")
28770 || ! strcmp (language_string, "libgccjit"))
28771 i = 0;
28772 else if (! strcmp (language_string, "GNU F77")
28773 || lang_GNU_Fortran ())
28774 i = 1;
28775 else if (! strcmp (language_string, "GNU Pascal"))
28776 i = 2;
28777 else if (! strcmp (language_string, "GNU Ada"))
28778 i = 3;
28779 else if (lang_GNU_CXX ()
28780 || ! strcmp (language_string, "GNU Objective-C++"))
28781 i = 9;
28782 else if (! strcmp (language_string, "GNU Java"))
28783 i = 13;
28784 else if (! strcmp (language_string, "GNU Objective-C"))
28785 i = 14;
28786 else
28787 gcc_unreachable ();
28788 fprintf (file, "%d,", i);
28790 /* 8 single bit fields: global linkage (not set for C extern linkage,
28791 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28792 from start of procedure stored in tbtab, internal function, function
28793 has controlled storage, function has no toc, function uses fp,
28794 function logs/aborts fp operations. */
28795 /* Assume that fp operations are used if any fp reg must be saved. */
28796 fprintf (file, "%d,",
28797 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28799 /* 6 bitfields: function is interrupt handler, name present in
28800 proc table, function calls alloca, on condition directives
28801 (controls stack walks, 3 bits), saves condition reg, saves
28802 link reg. */
28803 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28804 set up as a frame pointer, even when there is no alloca call. */
28805 fprintf (file, "%d,",
28806 ((optional_tbtab << 6)
28807 | ((optional_tbtab & frame_pointer_needed) << 5)
28808 | (info->cr_save_p << 1)
28809 | (info->lr_save_p)));
28811 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28812 (6 bits). */
28813 fprintf (file, "%d,",
28814 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28816 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28817 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28819 if (optional_tbtab)
28821 /* Compute the parameter info from the function decl argument
28822 list. */
28823 tree decl;
28824 int next_parm_info_bit = 31;
28826 for (decl = DECL_ARGUMENTS (current_function_decl);
28827 decl; decl = DECL_CHAIN (decl))
28829 rtx parameter = DECL_INCOMING_RTL (decl);
28830 machine_mode mode = GET_MODE (parameter);
28832 if (GET_CODE (parameter) == REG)
28834 if (SCALAR_FLOAT_MODE_P (mode))
28836 int bits;
28838 float_parms++;
28840 switch (mode)
28842 case E_SFmode:
28843 case E_SDmode:
28844 bits = 0x2;
28845 break;
28847 case E_DFmode:
28848 case E_DDmode:
28849 case E_TFmode:
28850 case E_TDmode:
28851 case E_IFmode:
28852 case E_KFmode:
28853 bits = 0x3;
28854 break;
28856 default:
28857 gcc_unreachable ();
28860 /* If only one bit will fit, don't or in this entry. */
28861 if (next_parm_info_bit > 0)
28862 parm_info |= (bits << (next_parm_info_bit - 1));
28863 next_parm_info_bit -= 2;
28865 else
28867 fixed_parms += ((GET_MODE_SIZE (mode)
28868 + (UNITS_PER_WORD - 1))
28869 / UNITS_PER_WORD);
28870 next_parm_info_bit -= 1;
28876 /* Number of fixed point parameters. */
28877 /* This is actually the number of words of fixed point parameters; thus
28878 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28879 fprintf (file, "%d,", fixed_parms);
28881 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28882 all on stack. */
28883 /* This is actually the number of fp registers that hold parameters;
28884 and thus the maximum value is 13. */
28885 /* Set parameters on stack bit if parameters are not in their original
28886 registers, regardless of whether they are on the stack? Xlc
28887 seems to set the bit when not optimizing. */
28888 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28890 if (optional_tbtab)
28892 /* Optional fields follow. Some are variable length. */
28894 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28895 float, 11 double float. */
28896 /* There is an entry for each parameter in a register, in the order
28897 that they occur in the parameter list. Any intervening arguments
28898 on the stack are ignored. If the list overflows a long (max
28899 possible length 34 bits) then completely leave off all elements
28900 that don't fit. */
28901 /* Only emit this long if there was at least one parameter. */
28902 if (fixed_parms || float_parms)
28903 fprintf (file, "\t.long %d\n", parm_info);
28905 /* Offset from start of code to tb table. */
28906 fputs ("\t.long ", file);
28907 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28908 RS6000_OUTPUT_BASENAME (file, fname);
28909 putc ('-', file);
28910 rs6000_output_function_entry (file, fname);
28911 putc ('\n', file);
28913 /* Interrupt handler mask. */
28914 /* Omit this long, since we never set the interrupt handler bit
28915 above. */
28917 /* Number of CTL (controlled storage) anchors. */
28918 /* Omit this long, since the has_ctl bit is never set above. */
28920 /* Displacement into stack of each CTL anchor. */
28921 /* Omit this list of longs, because there are no CTL anchors. */
28923 /* Length of function name. */
28924 if (*fname == '*')
28925 ++fname;
28926 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28928 /* Function name. */
28929 assemble_string (fname, strlen (fname));
28931 /* Register for alloca automatic storage; this is always reg 31.
28932 Only emit this if the alloca bit was set above. */
28933 if (frame_pointer_needed)
28934 fputs ("\t.byte 31\n", file);
28936 fputs ("\t.align 2\n", file);
28940 /* Arrange to define .LCTOC1 label, if not already done. */
28941 if (need_toc_init)
28943 need_toc_init = 0;
28944 if (!toc_initialized)
28946 switch_to_section (toc_section);
28947 switch_to_section (current_function_section ());
28952 /* -fsplit-stack support. */
28954 /* A SYMBOL_REF for __morestack. */
28955 static GTY(()) rtx morestack_ref;
28957 static rtx
28958 gen_add3_const (rtx rt, rtx ra, long c)
28960 if (TARGET_64BIT)
28961 return gen_adddi3 (rt, ra, GEN_INT (c));
28962 else
28963 return gen_addsi3 (rt, ra, GEN_INT (c));
28966 /* Emit -fsplit-stack prologue, which goes before the regular function
28967 prologue (at local entry point in the case of ELFv2). */
28969 void
28970 rs6000_expand_split_stack_prologue (void)
28972 rs6000_stack_t *info = rs6000_stack_info ();
28973 unsigned HOST_WIDE_INT allocate;
28974 long alloc_hi, alloc_lo;
28975 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28976 rtx_insn *insn;
28978 gcc_assert (flag_split_stack && reload_completed);
28980 if (!info->push_p)
28981 return;
28983 if (global_regs[29])
28985 error ("%qs uses register r29", "-fsplit-stack");
28986 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28987 "conflicts with %qD", global_regs_decl[29]);
28990 allocate = info->total_size;
28991 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28993 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28994 return;
28996 if (morestack_ref == NULL_RTX)
28998 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28999 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29000 | SYMBOL_FLAG_FUNCTION);
29003 r0 = gen_rtx_REG (Pmode, 0);
29004 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29005 r12 = gen_rtx_REG (Pmode, 12);
29006 emit_insn (gen_load_split_stack_limit (r0));
29007 /* Always emit two insns here to calculate the requested stack,
29008 so that the linker can edit them when adjusting size for calling
29009 non-split-stack code. */
29010 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29011 alloc_lo = -allocate - alloc_hi;
29012 if (alloc_hi != 0)
29014 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29015 if (alloc_lo != 0)
29016 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29017 else
29018 emit_insn (gen_nop ());
29020 else
29022 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29023 emit_insn (gen_nop ());
29026 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29027 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29028 ok_label = gen_label_rtx ();
29029 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29030 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29031 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29032 pc_rtx);
29033 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29034 JUMP_LABEL (insn) = ok_label;
29035 /* Mark the jump as very likely to be taken. */
29036 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29038 lr = gen_rtx_REG (Pmode, LR_REGNO);
29039 insn = emit_move_insn (r0, lr);
29040 RTX_FRAME_RELATED_P (insn) = 1;
29041 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29042 RTX_FRAME_RELATED_P (insn) = 1;
29044 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29045 const0_rtx, const0_rtx));
29046 call_fusage = NULL_RTX;
29047 use_reg (&call_fusage, r12);
29048 /* Say the call uses r0, even though it doesn't, to stop regrename
29049 from twiddling with the insns saving lr, trashing args for cfun.
29050 The insns restoring lr are similarly protected by making
29051 split_stack_return use r0. */
29052 use_reg (&call_fusage, r0);
29053 add_function_usage_to (insn, call_fusage);
29054 /* Indicate that this function can't jump to non-local gotos. */
29055 make_reg_eh_region_note_nothrow_nononlocal (insn);
29056 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29057 insn = emit_move_insn (lr, r0);
29058 add_reg_note (insn, REG_CFA_RESTORE, lr);
29059 RTX_FRAME_RELATED_P (insn) = 1;
29060 emit_insn (gen_split_stack_return ());
29062 emit_label (ok_label);
29063 LABEL_NUSES (ok_label) = 1;
29066 /* Return the internal arg pointer used for function incoming
29067 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29068 to copy it to a pseudo in order for it to be preserved over calls
29069 and suchlike. We'd really like to use a pseudo here for the
29070 internal arg pointer but data-flow analysis is not prepared to
29071 accept pseudos as live at the beginning of a function. */
29073 static rtx
29074 rs6000_internal_arg_pointer (void)
29076 if (flag_split_stack
29077 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29078 == NULL))
29081 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29083 rtx pat;
29085 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29086 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29088 /* Put the pseudo initialization right after the note at the
29089 beginning of the function. */
29090 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29091 gen_rtx_REG (Pmode, 12));
29092 push_topmost_sequence ();
29093 emit_insn_after (pat, get_insns ());
29094 pop_topmost_sequence ();
29096 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29097 FIRST_PARM_OFFSET (current_function_decl));
29099 return virtual_incoming_args_rtx;
29102 /* We may have to tell the dataflow pass that the split stack prologue
29103 is initializing a register. */
29105 static void
29106 rs6000_live_on_entry (bitmap regs)
29108 if (flag_split_stack)
29109 bitmap_set_bit (regs, 12);
29112 /* Emit -fsplit-stack dynamic stack allocation space check. */
29114 void
29115 rs6000_split_stack_space_check (rtx size, rtx label)
29117 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29118 rtx limit = gen_reg_rtx (Pmode);
29119 rtx requested = gen_reg_rtx (Pmode);
29120 rtx cmp = gen_reg_rtx (CCUNSmode);
29121 rtx jump;
29123 emit_insn (gen_load_split_stack_limit (limit));
29124 if (CONST_INT_P (size))
29125 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29126 else
29128 size = force_reg (Pmode, size);
29129 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29131 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29132 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29133 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29134 gen_rtx_LABEL_REF (VOIDmode, label),
29135 pc_rtx);
29136 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29137 JUMP_LABEL (jump) = label;
29140 /* A C compound statement that outputs the assembler code for a thunk
29141 function, used to implement C++ virtual function calls with
29142 multiple inheritance. The thunk acts as a wrapper around a virtual
29143 function, adjusting the implicit object parameter before handing
29144 control off to the real function.
29146 First, emit code to add the integer DELTA to the location that
29147 contains the incoming first argument. Assume that this argument
29148 contains a pointer, and is the one used to pass the `this' pointer
29149 in C++. This is the incoming argument *before* the function
29150 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29151 values of all other incoming arguments.
29153 After the addition, emit code to jump to FUNCTION, which is a
29154 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29155 not touch the return address. Hence returning from FUNCTION will
29156 return to whoever called the current `thunk'.
29158 The effect must be as if FUNCTION had been called directly with the
29159 adjusted first argument. This macro is responsible for emitting
29160 all of the code for a thunk function; output_function_prologue()
29161 and output_function_epilogue() are not invoked.
29163 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29164 been extracted from it.) It might possibly be useful on some
29165 targets, but probably not.
29167 If you do not define this macro, the target-independent code in the
29168 C++ frontend will generate a less efficient heavyweight thunk that
29169 calls FUNCTION instead of jumping to it. The generic approach does
29170 not support varargs. */
29172 static void
29173 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29174 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29175 tree function)
29177 rtx this_rtx, funexp;
29178 rtx_insn *insn;
29180 reload_completed = 1;
29181 epilogue_completed = 1;
29183 /* Mark the end of the (empty) prologue. */
29184 emit_note (NOTE_INSN_PROLOGUE_END);
29186 /* Find the "this" pointer. If the function returns a structure,
29187 the structure return pointer is in r3. */
29188 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29189 this_rtx = gen_rtx_REG (Pmode, 4);
29190 else
29191 this_rtx = gen_rtx_REG (Pmode, 3);
29193 /* Apply the constant offset, if required. */
29194 if (delta)
29195 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29197 /* Apply the offset from the vtable, if required. */
29198 if (vcall_offset)
29200 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29201 rtx tmp = gen_rtx_REG (Pmode, 12);
29203 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29204 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29206 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29207 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29209 else
29211 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29213 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29215 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29218 /* Generate a tail call to the target function. */
29219 if (!TREE_USED (function))
29221 assemble_external (function);
29222 TREE_USED (function) = 1;
29224 funexp = XEXP (DECL_RTL (function), 0);
29225 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29227 #if TARGET_MACHO
29228 if (MACHOPIC_INDIRECT)
29229 funexp = machopic_indirect_call_target (funexp);
29230 #endif
29232 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29233 generate sibcall RTL explicitly. */
29234 insn = emit_call_insn (
29235 gen_rtx_PARALLEL (VOIDmode,
29236 gen_rtvec (3,
29237 gen_rtx_CALL (VOIDmode,
29238 funexp, const0_rtx),
29239 gen_rtx_USE (VOIDmode, const0_rtx),
29240 simple_return_rtx)));
29241 SIBLING_CALL_P (insn) = 1;
29242 emit_barrier ();
29244 /* Run just enough of rest_of_compilation to get the insns emitted.
29245 There's not really enough bulk here to make other passes such as
29246 instruction scheduling worth while. Note that use_thunk calls
29247 assemble_start_function and assemble_end_function. */
29248 insn = get_insns ();
29249 shorten_branches (insn);
29250 final_start_function (insn, file, 1);
29251 final (insn, file, 1);
29252 final_end_function ();
29254 reload_completed = 0;
29255 epilogue_completed = 0;
29258 /* A quick summary of the various types of 'constant-pool tables'
29259 under PowerPC:
29261 Target Flags Name One table per
29262 AIX (none) AIX TOC object file
29263 AIX -mfull-toc AIX TOC object file
29264 AIX -mminimal-toc AIX minimal TOC translation unit
29265 SVR4/EABI (none) SVR4 SDATA object file
29266 SVR4/EABI -fpic SVR4 pic object file
29267 SVR4/EABI -fPIC SVR4 PIC translation unit
29268 SVR4/EABI -mrelocatable EABI TOC function
29269 SVR4/EABI -maix AIX TOC object file
29270 SVR4/EABI -maix -mminimal-toc
29271 AIX minimal TOC translation unit
29273 Name Reg. Set by entries contains:
29274 made by addrs? fp? sum?
29276 AIX TOC 2 crt0 as Y option option
29277 AIX minimal TOC 30 prolog gcc Y Y option
29278 SVR4 SDATA 13 crt0 gcc N Y N
29279 SVR4 pic 30 prolog ld Y not yet N
29280 SVR4 PIC 30 prolog gcc Y option option
29281 EABI TOC 30 prolog gcc Y option option
29285 /* Hash functions for the hash table. */
29287 static unsigned
29288 rs6000_hash_constant (rtx k)
29290 enum rtx_code code = GET_CODE (k);
29291 machine_mode mode = GET_MODE (k);
29292 unsigned result = (code << 3) ^ mode;
29293 const char *format;
29294 int flen, fidx;
29296 format = GET_RTX_FORMAT (code);
29297 flen = strlen (format);
29298 fidx = 0;
29300 switch (code)
29302 case LABEL_REF:
29303 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29305 case CONST_WIDE_INT:
29307 int i;
29308 flen = CONST_WIDE_INT_NUNITS (k);
29309 for (i = 0; i < flen; i++)
29310 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29311 return result;
29314 case CONST_DOUBLE:
29315 if (mode != VOIDmode)
29316 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29317 flen = 2;
29318 break;
29320 case CODE_LABEL:
29321 fidx = 3;
29322 break;
29324 default:
29325 break;
29328 for (; fidx < flen; fidx++)
29329 switch (format[fidx])
29331 case 's':
29333 unsigned i, len;
29334 const char *str = XSTR (k, fidx);
29335 len = strlen (str);
29336 result = result * 613 + len;
29337 for (i = 0; i < len; i++)
29338 result = result * 613 + (unsigned) str[i];
29339 break;
29341 case 'u':
29342 case 'e':
29343 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29344 break;
29345 case 'i':
29346 case 'n':
29347 result = result * 613 + (unsigned) XINT (k, fidx);
29348 break;
29349 case 'w':
29350 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29351 result = result * 613 + (unsigned) XWINT (k, fidx);
29352 else
29354 size_t i;
29355 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29356 result = result * 613 + (unsigned) (XWINT (k, fidx)
29357 >> CHAR_BIT * i);
29359 break;
29360 case '0':
29361 break;
29362 default:
29363 gcc_unreachable ();
29366 return result;
29369 hashval_t
29370 toc_hasher::hash (toc_hash_struct *thc)
29372 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29375 /* Compare H1 and H2 for equivalence. */
29377 bool
29378 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29380 rtx r1 = h1->key;
29381 rtx r2 = h2->key;
29383 if (h1->key_mode != h2->key_mode)
29384 return 0;
29386 return rtx_equal_p (r1, r2);
29389 /* These are the names given by the C++ front-end to vtables, and
29390 vtable-like objects. Ideally, this logic should not be here;
29391 instead, there should be some programmatic way of inquiring as
29392 to whether or not an object is a vtable. */
29394 #define VTABLE_NAME_P(NAME) \
29395 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29396 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29397 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29398 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29399 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29401 #ifdef NO_DOLLAR_IN_LABEL
29402 /* Return a GGC-allocated character string translating dollar signs in
29403 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29405 const char *
29406 rs6000_xcoff_strip_dollar (const char *name)
29408 char *strip, *p;
29409 const char *q;
29410 size_t len;
29412 q = (const char *) strchr (name, '$');
29414 if (q == 0 || q == name)
29415 return name;
29417 len = strlen (name);
29418 strip = XALLOCAVEC (char, len + 1);
29419 strcpy (strip, name);
29420 p = strip + (q - name);
29421 while (p)
29423 *p = '_';
29424 p = strchr (p + 1, '$');
29427 return ggc_alloc_string (strip, len);
29429 #endif
29431 void
29432 rs6000_output_symbol_ref (FILE *file, rtx x)
29434 const char *name = XSTR (x, 0);
29436 /* Currently C++ toc references to vtables can be emitted before it
29437 is decided whether the vtable is public or private. If this is
29438 the case, then the linker will eventually complain that there is
29439 a reference to an unknown section. Thus, for vtables only,
29440 we emit the TOC reference to reference the identifier and not the
29441 symbol. */
29442 if (VTABLE_NAME_P (name))
29444 RS6000_OUTPUT_BASENAME (file, name);
29446 else
29447 assemble_name (file, name);
29450 /* Output a TOC entry. We derive the entry name from what is being
29451 written. */
29453 void
29454 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29456 char buf[256];
29457 const char *name = buf;
29458 rtx base = x;
29459 HOST_WIDE_INT offset = 0;
29461 gcc_assert (!TARGET_NO_TOC);
29463 /* When the linker won't eliminate them, don't output duplicate
29464 TOC entries (this happens on AIX if there is any kind of TOC,
29465 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29466 CODE_LABELs. */
29467 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29469 struct toc_hash_struct *h;
29471 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29472 time because GGC is not initialized at that point. */
29473 if (toc_hash_table == NULL)
29474 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29476 h = ggc_alloc<toc_hash_struct> ();
29477 h->key = x;
29478 h->key_mode = mode;
29479 h->labelno = labelno;
29481 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29482 if (*found == NULL)
29483 *found = h;
29484 else /* This is indeed a duplicate.
29485 Set this label equal to that label. */
29487 fputs ("\t.set ", file);
29488 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29489 fprintf (file, "%d,", labelno);
29490 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29491 fprintf (file, "%d\n", ((*found)->labelno));
29493 #ifdef HAVE_AS_TLS
29494 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29495 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29496 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29498 fputs ("\t.set ", file);
29499 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29500 fprintf (file, "%d,", labelno);
29501 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29502 fprintf (file, "%d\n", ((*found)->labelno));
29504 #endif
29505 return;
29509 /* If we're going to put a double constant in the TOC, make sure it's
29510 aligned properly when strict alignment is on. */
29511 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29512 && STRICT_ALIGNMENT
29513 && GET_MODE_BITSIZE (mode) >= 64
29514 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29515 ASM_OUTPUT_ALIGN (file, 3);
29518 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29520 /* Handle FP constants specially. Note that if we have a minimal
29521 TOC, things we put here aren't actually in the TOC, so we can allow
29522 FP constants. */
29523 if (GET_CODE (x) == CONST_DOUBLE &&
29524 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29525 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29527 long k[4];
29529 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29530 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29531 else
29532 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29534 if (TARGET_64BIT)
29536 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29537 fputs (DOUBLE_INT_ASM_OP, file);
29538 else
29539 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29540 k[0] & 0xffffffff, k[1] & 0xffffffff,
29541 k[2] & 0xffffffff, k[3] & 0xffffffff);
29542 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29543 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29544 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29545 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29546 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29547 return;
29549 else
29551 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29552 fputs ("\t.long ", file);
29553 else
29554 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29555 k[0] & 0xffffffff, k[1] & 0xffffffff,
29556 k[2] & 0xffffffff, k[3] & 0xffffffff);
29557 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29558 k[0] & 0xffffffff, k[1] & 0xffffffff,
29559 k[2] & 0xffffffff, k[3] & 0xffffffff);
29560 return;
29563 else if (GET_CODE (x) == CONST_DOUBLE &&
29564 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29566 long k[2];
29568 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29569 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29570 else
29571 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29573 if (TARGET_64BIT)
29575 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29576 fputs (DOUBLE_INT_ASM_OP, file);
29577 else
29578 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29579 k[0] & 0xffffffff, k[1] & 0xffffffff);
29580 fprintf (file, "0x%lx%08lx\n",
29581 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29582 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29583 return;
29585 else
29587 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29588 fputs ("\t.long ", file);
29589 else
29590 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29591 k[0] & 0xffffffff, k[1] & 0xffffffff);
29592 fprintf (file, "0x%lx,0x%lx\n",
29593 k[0] & 0xffffffff, k[1] & 0xffffffff);
29594 return;
29597 else if (GET_CODE (x) == CONST_DOUBLE &&
29598 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29600 long l;
29602 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29603 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29604 else
29605 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29607 if (TARGET_64BIT)
29609 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29610 fputs (DOUBLE_INT_ASM_OP, file);
29611 else
29612 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29613 if (WORDS_BIG_ENDIAN)
29614 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29615 else
29616 fprintf (file, "0x%lx\n", l & 0xffffffff);
29617 return;
29619 else
29621 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29622 fputs ("\t.long ", file);
29623 else
29624 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29625 fprintf (file, "0x%lx\n", l & 0xffffffff);
29626 return;
29629 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29631 unsigned HOST_WIDE_INT low;
29632 HOST_WIDE_INT high;
29634 low = INTVAL (x) & 0xffffffff;
29635 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29637 /* TOC entries are always Pmode-sized, so when big-endian
29638 smaller integer constants in the TOC need to be padded.
29639 (This is still a win over putting the constants in
29640 a separate constant pool, because then we'd have
29641 to have both a TOC entry _and_ the actual constant.)
29643 For a 32-bit target, CONST_INT values are loaded and shifted
29644 entirely within `low' and can be stored in one TOC entry. */
29646 /* It would be easy to make this work, but it doesn't now. */
29647 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29649 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29651 low |= high << 32;
29652 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29653 high = (HOST_WIDE_INT) low >> 32;
29654 low &= 0xffffffff;
29657 if (TARGET_64BIT)
29659 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29660 fputs (DOUBLE_INT_ASM_OP, file);
29661 else
29662 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29663 (long) high & 0xffffffff, (long) low & 0xffffffff);
29664 fprintf (file, "0x%lx%08lx\n",
29665 (long) high & 0xffffffff, (long) low & 0xffffffff);
29666 return;
29668 else
29670 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29672 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29673 fputs ("\t.long ", file);
29674 else
29675 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29676 (long) high & 0xffffffff, (long) low & 0xffffffff);
29677 fprintf (file, "0x%lx,0x%lx\n",
29678 (long) high & 0xffffffff, (long) low & 0xffffffff);
29680 else
29682 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29683 fputs ("\t.long ", file);
29684 else
29685 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29686 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29688 return;
29692 if (GET_CODE (x) == CONST)
29694 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29695 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29697 base = XEXP (XEXP (x, 0), 0);
29698 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29701 switch (GET_CODE (base))
29703 case SYMBOL_REF:
29704 name = XSTR (base, 0);
29705 break;
29707 case LABEL_REF:
29708 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29709 CODE_LABEL_NUMBER (XEXP (base, 0)));
29710 break;
29712 case CODE_LABEL:
29713 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29714 break;
29716 default:
29717 gcc_unreachable ();
29720 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29721 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29722 else
29724 fputs ("\t.tc ", file);
29725 RS6000_OUTPUT_BASENAME (file, name);
29727 if (offset < 0)
29728 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29729 else if (offset)
29730 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29732 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29733 after other TOC symbols, reducing overflow of small TOC access
29734 to [TC] symbols. */
29735 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29736 ? "[TE]," : "[TC],", file);
29739 /* Currently C++ toc references to vtables can be emitted before it
29740 is decided whether the vtable is public or private. If this is
29741 the case, then the linker will eventually complain that there is
29742 a TOC reference to an unknown section. Thus, for vtables only,
29743 we emit the TOC reference to reference the symbol and not the
29744 section. */
29745 if (VTABLE_NAME_P (name))
29747 RS6000_OUTPUT_BASENAME (file, name);
29748 if (offset < 0)
29749 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29750 else if (offset > 0)
29751 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29753 else
29754 output_addr_const (file, x);
29756 #if HAVE_AS_TLS
29757 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29759 switch (SYMBOL_REF_TLS_MODEL (base))
29761 case 0:
29762 break;
29763 case TLS_MODEL_LOCAL_EXEC:
29764 fputs ("@le", file);
29765 break;
29766 case TLS_MODEL_INITIAL_EXEC:
29767 fputs ("@ie", file);
29768 break;
29769 /* Use global-dynamic for local-dynamic. */
29770 case TLS_MODEL_GLOBAL_DYNAMIC:
29771 case TLS_MODEL_LOCAL_DYNAMIC:
29772 putc ('\n', file);
29773 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29774 fputs ("\t.tc .", file);
29775 RS6000_OUTPUT_BASENAME (file, name);
29776 fputs ("[TC],", file);
29777 output_addr_const (file, x);
29778 fputs ("@m", file);
29779 break;
29780 default:
29781 gcc_unreachable ();
29784 #endif
29786 putc ('\n', file);
29789 /* Output an assembler pseudo-op to write an ASCII string of N characters
29790 starting at P to FILE.
29792 On the RS/6000, we have to do this using the .byte operation and
29793 write out special characters outside the quoted string.
29794 Also, the assembler is broken; very long strings are truncated,
29795 so we must artificially break them up early. */
29797 void
29798 output_ascii (FILE *file, const char *p, int n)
29800 char c;
29801 int i, count_string;
29802 const char *for_string = "\t.byte \"";
29803 const char *for_decimal = "\t.byte ";
29804 const char *to_close = NULL;
29806 count_string = 0;
29807 for (i = 0; i < n; i++)
29809 c = *p++;
29810 if (c >= ' ' && c < 0177)
29812 if (for_string)
29813 fputs (for_string, file);
29814 putc (c, file);
29816 /* Write two quotes to get one. */
29817 if (c == '"')
29819 putc (c, file);
29820 ++count_string;
29823 for_string = NULL;
29824 for_decimal = "\"\n\t.byte ";
29825 to_close = "\"\n";
29826 ++count_string;
29828 if (count_string >= 512)
29830 fputs (to_close, file);
29832 for_string = "\t.byte \"";
29833 for_decimal = "\t.byte ";
29834 to_close = NULL;
29835 count_string = 0;
29838 else
29840 if (for_decimal)
29841 fputs (for_decimal, file);
29842 fprintf (file, "%d", c);
29844 for_string = "\n\t.byte \"";
29845 for_decimal = ", ";
29846 to_close = "\n";
29847 count_string = 0;
29851 /* Now close the string if we have written one. Then end the line. */
29852 if (to_close)
29853 fputs (to_close, file);
29856 /* Generate a unique section name for FILENAME for a section type
29857 represented by SECTION_DESC. Output goes into BUF.
29859 SECTION_DESC can be any string, as long as it is different for each
29860 possible section type.
29862 We name the section in the same manner as xlc. The name begins with an
29863 underscore followed by the filename (after stripping any leading directory
29864 names) with the last period replaced by the string SECTION_DESC. If
29865 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29866 the name. */
29868 void
29869 rs6000_gen_section_name (char **buf, const char *filename,
29870 const char *section_desc)
29872 const char *q, *after_last_slash, *last_period = 0;
29873 char *p;
29874 int len;
29876 after_last_slash = filename;
29877 for (q = filename; *q; q++)
29879 if (*q == '/')
29880 after_last_slash = q + 1;
29881 else if (*q == '.')
29882 last_period = q;
29885 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29886 *buf = (char *) xmalloc (len);
29888 p = *buf;
29889 *p++ = '_';
29891 for (q = after_last_slash; *q; q++)
29893 if (q == last_period)
29895 strcpy (p, section_desc);
29896 p += strlen (section_desc);
29897 break;
29900 else if (ISALNUM (*q))
29901 *p++ = *q;
29904 if (last_period == 0)
29905 strcpy (p, section_desc);
29906 else
29907 *p = '\0';
29910 /* Emit profile function. */
29912 void
29913 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29915 /* Non-standard profiling for kernels, which just saves LR then calls
29916 _mcount without worrying about arg saves. The idea is to change
29917 the function prologue as little as possible as it isn't easy to
29918 account for arg save/restore code added just for _mcount. */
29919 if (TARGET_PROFILE_KERNEL)
29920 return;
29922 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29924 #ifndef NO_PROFILE_COUNTERS
29925 # define NO_PROFILE_COUNTERS 0
29926 #endif
29927 if (NO_PROFILE_COUNTERS)
29928 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29929 LCT_NORMAL, VOIDmode);
29930 else
29932 char buf[30];
29933 const char *label_name;
29934 rtx fun;
29936 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29937 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29938 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29940 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29941 LCT_NORMAL, VOIDmode, fun, Pmode);
29944 else if (DEFAULT_ABI == ABI_DARWIN)
29946 const char *mcount_name = RS6000_MCOUNT;
29947 int caller_addr_regno = LR_REGNO;
29949 /* Be conservative and always set this, at least for now. */
29950 crtl->uses_pic_offset_table = 1;
29952 #if TARGET_MACHO
29953 /* For PIC code, set up a stub and collect the caller's address
29954 from r0, which is where the prologue puts it. */
29955 if (MACHOPIC_INDIRECT
29956 && crtl->uses_pic_offset_table)
29957 caller_addr_regno = 0;
29958 #endif
29959 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29960 LCT_NORMAL, VOIDmode,
29961 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29965 /* Write function profiler code. */
29967 void
29968 output_function_profiler (FILE *file, int labelno)
29970 char buf[100];
29972 switch (DEFAULT_ABI)
29974 default:
29975 gcc_unreachable ();
29977 case ABI_V4:
29978 if (!TARGET_32BIT)
29980 warning (0, "no profiling of 64-bit code for this ABI");
29981 return;
29983 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29984 fprintf (file, "\tmflr %s\n", reg_names[0]);
29985 if (NO_PROFILE_COUNTERS)
29987 asm_fprintf (file, "\tstw %s,4(%s)\n",
29988 reg_names[0], reg_names[1]);
29990 else if (TARGET_SECURE_PLT && flag_pic)
29992 if (TARGET_LINK_STACK)
29994 char name[32];
29995 get_ppc476_thunk_name (name);
29996 asm_fprintf (file, "\tbl %s\n", name);
29998 else
29999 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30000 asm_fprintf (file, "\tstw %s,4(%s)\n",
30001 reg_names[0], reg_names[1]);
30002 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30003 asm_fprintf (file, "\taddis %s,%s,",
30004 reg_names[12], reg_names[12]);
30005 assemble_name (file, buf);
30006 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30007 assemble_name (file, buf);
30008 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30010 else if (flag_pic == 1)
30012 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30013 asm_fprintf (file, "\tstw %s,4(%s)\n",
30014 reg_names[0], reg_names[1]);
30015 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30016 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30017 assemble_name (file, buf);
30018 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30020 else if (flag_pic > 1)
30022 asm_fprintf (file, "\tstw %s,4(%s)\n",
30023 reg_names[0], reg_names[1]);
30024 /* Now, we need to get the address of the label. */
30025 if (TARGET_LINK_STACK)
30027 char name[32];
30028 get_ppc476_thunk_name (name);
30029 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30030 assemble_name (file, buf);
30031 fputs ("-.\n1:", file);
30032 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30033 asm_fprintf (file, "\taddi %s,%s,4\n",
30034 reg_names[11], reg_names[11]);
30036 else
30038 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30039 assemble_name (file, buf);
30040 fputs ("-.\n1:", file);
30041 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30043 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30044 reg_names[0], reg_names[11]);
30045 asm_fprintf (file, "\tadd %s,%s,%s\n",
30046 reg_names[0], reg_names[0], reg_names[11]);
30048 else
30050 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30051 assemble_name (file, buf);
30052 fputs ("@ha\n", file);
30053 asm_fprintf (file, "\tstw %s,4(%s)\n",
30054 reg_names[0], reg_names[1]);
30055 asm_fprintf (file, "\tla %s,", reg_names[0]);
30056 assemble_name (file, buf);
30057 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30060 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30061 fprintf (file, "\tbl %s%s\n",
30062 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30063 break;
30065 case ABI_AIX:
30066 case ABI_ELFv2:
30067 case ABI_DARWIN:
30068 /* Don't do anything, done in output_profile_hook (). */
30069 break;
30075 /* The following variable value is the last issued insn. */
30077 static rtx_insn *last_scheduled_insn;
30079 /* The following variable helps to balance issuing of load and
30080 store instructions */
30082 static int load_store_pendulum;
30084 /* The following variable helps pair divide insns during scheduling. */
30085 static int divide_cnt;
30086 /* The following variable helps pair and alternate vector and vector load
30087 insns during scheduling. */
30088 static int vec_pairing;
30091 /* Power4 load update and store update instructions are cracked into a
30092 load or store and an integer insn which are executed in the same cycle.
30093 Branches have their own dispatch slot which does not count against the
30094 GCC issue rate, but it changes the program flow so there are no other
30095 instructions to issue in this cycle. */
30097 static int
30098 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30100 last_scheduled_insn = insn;
30101 if (GET_CODE (PATTERN (insn)) == USE
30102 || GET_CODE (PATTERN (insn)) == CLOBBER)
30104 cached_can_issue_more = more;
30105 return cached_can_issue_more;
30108 if (insn_terminates_group_p (insn, current_group))
30110 cached_can_issue_more = 0;
30111 return cached_can_issue_more;
30114 /* If no reservation, but reach here */
30115 if (recog_memoized (insn) < 0)
30116 return more;
30118 if (rs6000_sched_groups)
30120 if (is_microcoded_insn (insn))
30121 cached_can_issue_more = 0;
30122 else if (is_cracked_insn (insn))
30123 cached_can_issue_more = more > 2 ? more - 2 : 0;
30124 else
30125 cached_can_issue_more = more - 1;
30127 return cached_can_issue_more;
30130 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30131 return 0;
30133 cached_can_issue_more = more - 1;
30134 return cached_can_issue_more;
30137 static int
30138 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30140 int r = rs6000_variable_issue_1 (insn, more);
30141 if (verbose)
30142 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30143 return r;
30146 /* Adjust the cost of a scheduling dependency. Return the new cost of
30147 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30149 static int
30150 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30151 unsigned int)
30153 enum attr_type attr_type;
30155 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30156 return cost;
30158 switch (dep_type)
30160 case REG_DEP_TRUE:
30162 /* Data dependency; DEP_INSN writes a register that INSN reads
30163 some cycles later. */
30165 /* Separate a load from a narrower, dependent store. */
30166 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30167 && GET_CODE (PATTERN (insn)) == SET
30168 && GET_CODE (PATTERN (dep_insn)) == SET
30169 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30170 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30171 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30172 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30173 return cost + 14;
30175 attr_type = get_attr_type (insn);
30177 switch (attr_type)
30179 case TYPE_JMPREG:
30180 /* Tell the first scheduling pass about the latency between
30181 a mtctr and bctr (and mtlr and br/blr). The first
30182 scheduling pass will not know about this latency since
30183 the mtctr instruction, which has the latency associated
30184 to it, will be generated by reload. */
30185 return 4;
30186 case TYPE_BRANCH:
30187 /* Leave some extra cycles between a compare and its
30188 dependent branch, to inhibit expensive mispredicts. */
30189 if ((rs6000_cpu_attr == CPU_PPC603
30190 || rs6000_cpu_attr == CPU_PPC604
30191 || rs6000_cpu_attr == CPU_PPC604E
30192 || rs6000_cpu_attr == CPU_PPC620
30193 || rs6000_cpu_attr == CPU_PPC630
30194 || rs6000_cpu_attr == CPU_PPC750
30195 || rs6000_cpu_attr == CPU_PPC7400
30196 || rs6000_cpu_attr == CPU_PPC7450
30197 || rs6000_cpu_attr == CPU_PPCE5500
30198 || rs6000_cpu_attr == CPU_PPCE6500
30199 || rs6000_cpu_attr == CPU_POWER4
30200 || rs6000_cpu_attr == CPU_POWER5
30201 || rs6000_cpu_attr == CPU_POWER7
30202 || rs6000_cpu_attr == CPU_POWER8
30203 || rs6000_cpu_attr == CPU_POWER9
30204 || rs6000_cpu_attr == CPU_CELL)
30205 && recog_memoized (dep_insn)
30206 && (INSN_CODE (dep_insn) >= 0))
30208 switch (get_attr_type (dep_insn))
30210 case TYPE_CMP:
30211 case TYPE_FPCOMPARE:
30212 case TYPE_CR_LOGICAL:
30213 case TYPE_DELAYED_CR:
30214 return cost + 2;
30215 case TYPE_EXTS:
30216 case TYPE_MUL:
30217 if (get_attr_dot (dep_insn) == DOT_YES)
30218 return cost + 2;
30219 else
30220 break;
30221 case TYPE_SHIFT:
30222 if (get_attr_dot (dep_insn) == DOT_YES
30223 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30224 return cost + 2;
30225 else
30226 break;
30227 default:
30228 break;
30230 break;
30232 case TYPE_STORE:
30233 case TYPE_FPSTORE:
30234 if ((rs6000_cpu == PROCESSOR_POWER6)
30235 && recog_memoized (dep_insn)
30236 && (INSN_CODE (dep_insn) >= 0))
30239 if (GET_CODE (PATTERN (insn)) != SET)
30240 /* If this happens, we have to extend this to schedule
30241 optimally. Return default for now. */
30242 return cost;
30244 /* Adjust the cost for the case where the value written
30245 by a fixed point operation is used as the address
30246 gen value on a store. */
30247 switch (get_attr_type (dep_insn))
30249 case TYPE_LOAD:
30250 case TYPE_CNTLZ:
30252 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30253 return get_attr_sign_extend (dep_insn)
30254 == SIGN_EXTEND_YES ? 6 : 4;
30255 break;
30257 case TYPE_SHIFT:
30259 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30260 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30261 6 : 3;
30262 break;
30264 case TYPE_INTEGER:
30265 case TYPE_ADD:
30266 case TYPE_LOGICAL:
30267 case TYPE_EXTS:
30268 case TYPE_INSERT:
30270 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30271 return 3;
30272 break;
30274 case TYPE_STORE:
30275 case TYPE_FPLOAD:
30276 case TYPE_FPSTORE:
30278 if (get_attr_update (dep_insn) == UPDATE_YES
30279 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30280 return 3;
30281 break;
30283 case TYPE_MUL:
30285 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30286 return 17;
30287 break;
30289 case TYPE_DIV:
30291 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30292 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30293 break;
30295 default:
30296 break;
30299 break;
30301 case TYPE_LOAD:
30302 if ((rs6000_cpu == PROCESSOR_POWER6)
30303 && recog_memoized (dep_insn)
30304 && (INSN_CODE (dep_insn) >= 0))
30307 /* Adjust the cost for the case where the value written
30308 by a fixed point instruction is used within the address
30309 gen portion of a subsequent load(u)(x) */
30310 switch (get_attr_type (dep_insn))
30312 case TYPE_LOAD:
30313 case TYPE_CNTLZ:
30315 if (set_to_load_agen (dep_insn, insn))
30316 return get_attr_sign_extend (dep_insn)
30317 == SIGN_EXTEND_YES ? 6 : 4;
30318 break;
30320 case TYPE_SHIFT:
30322 if (set_to_load_agen (dep_insn, insn))
30323 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30324 6 : 3;
30325 break;
30327 case TYPE_INTEGER:
30328 case TYPE_ADD:
30329 case TYPE_LOGICAL:
30330 case TYPE_EXTS:
30331 case TYPE_INSERT:
30333 if (set_to_load_agen (dep_insn, insn))
30334 return 3;
30335 break;
30337 case TYPE_STORE:
30338 case TYPE_FPLOAD:
30339 case TYPE_FPSTORE:
30341 if (get_attr_update (dep_insn) == UPDATE_YES
30342 && set_to_load_agen (dep_insn, insn))
30343 return 3;
30344 break;
30346 case TYPE_MUL:
30348 if (set_to_load_agen (dep_insn, insn))
30349 return 17;
30350 break;
30352 case TYPE_DIV:
30354 if (set_to_load_agen (dep_insn, insn))
30355 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30356 break;
30358 default:
30359 break;
30362 break;
30364 case TYPE_FPLOAD:
30365 if ((rs6000_cpu == PROCESSOR_POWER6)
30366 && get_attr_update (insn) == UPDATE_NO
30367 && recog_memoized (dep_insn)
30368 && (INSN_CODE (dep_insn) >= 0)
30369 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30370 return 2;
30372 default:
30373 break;
30376 /* Fall out to return default cost. */
30378 break;
30380 case REG_DEP_OUTPUT:
30381 /* Output dependency; DEP_INSN writes a register that INSN writes some
30382 cycles later. */
30383 if ((rs6000_cpu == PROCESSOR_POWER6)
30384 && recog_memoized (dep_insn)
30385 && (INSN_CODE (dep_insn) >= 0))
30387 attr_type = get_attr_type (insn);
30389 switch (attr_type)
30391 case TYPE_FP:
30392 case TYPE_FPSIMPLE:
30393 if (get_attr_type (dep_insn) == TYPE_FP
30394 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30395 return 1;
30396 break;
30397 case TYPE_FPLOAD:
30398 if (get_attr_update (insn) == UPDATE_NO
30399 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30400 return 2;
30401 break;
30402 default:
30403 break;
30406 /* Fall through, no cost for output dependency. */
30407 /* FALLTHRU */
30409 case REG_DEP_ANTI:
30410 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30411 cycles later. */
30412 return 0;
30414 default:
30415 gcc_unreachable ();
30418 return cost;
30421 /* Debug version of rs6000_adjust_cost. */
30423 static int
30424 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30425 int cost, unsigned int dw)
30427 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30429 if (ret != cost)
30431 const char *dep;
30433 switch (dep_type)
30435 default: dep = "unknown depencency"; break;
30436 case REG_DEP_TRUE: dep = "data dependency"; break;
30437 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30438 case REG_DEP_ANTI: dep = "anti depencency"; break;
30441 fprintf (stderr,
30442 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30443 "%s, insn:\n", ret, cost, dep);
30445 debug_rtx (insn);
30448 return ret;
30451 /* The function returns a true if INSN is microcoded.
30452 Return false otherwise. */
30454 static bool
30455 is_microcoded_insn (rtx_insn *insn)
30457 if (!insn || !NONDEBUG_INSN_P (insn)
30458 || GET_CODE (PATTERN (insn)) == USE
30459 || GET_CODE (PATTERN (insn)) == CLOBBER)
30460 return false;
30462 if (rs6000_cpu_attr == CPU_CELL)
30463 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30465 if (rs6000_sched_groups
30466 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30468 enum attr_type type = get_attr_type (insn);
30469 if ((type == TYPE_LOAD
30470 && get_attr_update (insn) == UPDATE_YES
30471 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30472 || ((type == TYPE_LOAD || type == TYPE_STORE)
30473 && get_attr_update (insn) == UPDATE_YES
30474 && get_attr_indexed (insn) == INDEXED_YES)
30475 || type == TYPE_MFCR)
30476 return true;
30479 return false;
30482 /* The function returns true if INSN is cracked into 2 instructions
30483 by the processor (and therefore occupies 2 issue slots). */
30485 static bool
30486 is_cracked_insn (rtx_insn *insn)
30488 if (!insn || !NONDEBUG_INSN_P (insn)
30489 || GET_CODE (PATTERN (insn)) == USE
30490 || GET_CODE (PATTERN (insn)) == CLOBBER)
30491 return false;
30493 if (rs6000_sched_groups
30494 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30496 enum attr_type type = get_attr_type (insn);
30497 if ((type == TYPE_LOAD
30498 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30499 && get_attr_update (insn) == UPDATE_NO)
30500 || (type == TYPE_LOAD
30501 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30502 && get_attr_update (insn) == UPDATE_YES
30503 && get_attr_indexed (insn) == INDEXED_NO)
30504 || (type == TYPE_STORE
30505 && get_attr_update (insn) == UPDATE_YES
30506 && get_attr_indexed (insn) == INDEXED_NO)
30507 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30508 && get_attr_update (insn) == UPDATE_YES)
30509 || type == TYPE_DELAYED_CR
30510 || (type == TYPE_EXTS
30511 && get_attr_dot (insn) == DOT_YES)
30512 || (type == TYPE_SHIFT
30513 && get_attr_dot (insn) == DOT_YES
30514 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30515 || (type == TYPE_MUL
30516 && get_attr_dot (insn) == DOT_YES)
30517 || type == TYPE_DIV
30518 || (type == TYPE_INSERT
30519 && get_attr_size (insn) == SIZE_32))
30520 return true;
30523 return false;
30526 /* The function returns true if INSN can be issued only from
30527 the branch slot. */
30529 static bool
30530 is_branch_slot_insn (rtx_insn *insn)
30532 if (!insn || !NONDEBUG_INSN_P (insn)
30533 || GET_CODE (PATTERN (insn)) == USE
30534 || GET_CODE (PATTERN (insn)) == CLOBBER)
30535 return false;
30537 if (rs6000_sched_groups)
30539 enum attr_type type = get_attr_type (insn);
30540 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30541 return true;
30542 return false;
30545 return false;
30548 /* The function returns true if out_inst sets a value that is
30549 used in the address generation computation of in_insn */
30550 static bool
30551 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30553 rtx out_set, in_set;
30555 /* For performance reasons, only handle the simple case where
30556 both loads are a single_set. */
30557 out_set = single_set (out_insn);
30558 if (out_set)
30560 in_set = single_set (in_insn);
30561 if (in_set)
30562 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30565 return false;
30568 /* Try to determine base/offset/size parts of the given MEM.
30569 Return true if successful, false if all the values couldn't
30570 be determined.
30572 This function only looks for REG or REG+CONST address forms.
30573 REG+REG address form will return false. */
30575 static bool
30576 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30577 HOST_WIDE_INT *size)
30579 rtx addr_rtx;
30580 if MEM_SIZE_KNOWN_P (mem)
30581 *size = MEM_SIZE (mem);
30582 else
30583 return false;
30585 addr_rtx = (XEXP (mem, 0));
30586 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30587 addr_rtx = XEXP (addr_rtx, 1);
30589 *offset = 0;
30590 while (GET_CODE (addr_rtx) == PLUS
30591 && CONST_INT_P (XEXP (addr_rtx, 1)))
30593 *offset += INTVAL (XEXP (addr_rtx, 1));
30594 addr_rtx = XEXP (addr_rtx, 0);
30596 if (!REG_P (addr_rtx))
30597 return false;
30599 *base = addr_rtx;
30600 return true;
30603 /* The function returns true if the target storage location of
30604 mem1 is adjacent to the target storage location of mem2 */
30605 /* Return 1 if memory locations are adjacent. */
30607 static bool
30608 adjacent_mem_locations (rtx mem1, rtx mem2)
30610 rtx reg1, reg2;
30611 HOST_WIDE_INT off1, size1, off2, size2;
30613 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30614 && get_memref_parts (mem2, &reg2, &off2, &size2))
30615 return ((REGNO (reg1) == REGNO (reg2))
30616 && ((off1 + size1 == off2)
30617 || (off2 + size2 == off1)));
30619 return false;
30622 /* This function returns true if it can be determined that the two MEM
30623 locations overlap by at least 1 byte based on base reg/offset/size. */
30625 static bool
30626 mem_locations_overlap (rtx mem1, rtx mem2)
30628 rtx reg1, reg2;
30629 HOST_WIDE_INT off1, size1, off2, size2;
30631 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30632 && get_memref_parts (mem2, &reg2, &off2, &size2))
30633 return ((REGNO (reg1) == REGNO (reg2))
30634 && (((off1 <= off2) && (off1 + size1 > off2))
30635 || ((off2 <= off1) && (off2 + size2 > off1))));
30637 return false;
30640 /* A C statement (sans semicolon) to update the integer scheduling
30641 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30642 INSN earlier, reduce the priority to execute INSN later. Do not
30643 define this macro if you do not need to adjust the scheduling
30644 priorities of insns. */
30646 static int
30647 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30649 rtx load_mem, str_mem;
30650 /* On machines (like the 750) which have asymmetric integer units,
30651 where one integer unit can do multiply and divides and the other
30652 can't, reduce the priority of multiply/divide so it is scheduled
30653 before other integer operations. */
30655 #if 0
30656 if (! INSN_P (insn))
30657 return priority;
30659 if (GET_CODE (PATTERN (insn)) == USE)
30660 return priority;
30662 switch (rs6000_cpu_attr) {
30663 case CPU_PPC750:
30664 switch (get_attr_type (insn))
30666 default:
30667 break;
30669 case TYPE_MUL:
30670 case TYPE_DIV:
30671 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30672 priority, priority);
30673 if (priority >= 0 && priority < 0x01000000)
30674 priority >>= 3;
30675 break;
30678 #endif
30680 if (insn_must_be_first_in_group (insn)
30681 && reload_completed
30682 && current_sched_info->sched_max_insns_priority
30683 && rs6000_sched_restricted_insns_priority)
30686 /* Prioritize insns that can be dispatched only in the first
30687 dispatch slot. */
30688 if (rs6000_sched_restricted_insns_priority == 1)
30689 /* Attach highest priority to insn. This means that in
30690 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30691 precede 'priority' (critical path) considerations. */
30692 return current_sched_info->sched_max_insns_priority;
30693 else if (rs6000_sched_restricted_insns_priority == 2)
30694 /* Increase priority of insn by a minimal amount. This means that in
30695 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30696 considerations precede dispatch-slot restriction considerations. */
30697 return (priority + 1);
30700 if (rs6000_cpu == PROCESSOR_POWER6
30701 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30702 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30703 /* Attach highest priority to insn if the scheduler has just issued two
30704 stores and this instruction is a load, or two loads and this instruction
30705 is a store. Power6 wants loads and stores scheduled alternately
30706 when possible */
30707 return current_sched_info->sched_max_insns_priority;
30709 return priority;
30712 /* Return true if the instruction is nonpipelined on the Cell. */
30713 static bool
30714 is_nonpipeline_insn (rtx_insn *insn)
30716 enum attr_type type;
30717 if (!insn || !NONDEBUG_INSN_P (insn)
30718 || GET_CODE (PATTERN (insn)) == USE
30719 || GET_CODE (PATTERN (insn)) == CLOBBER)
30720 return false;
30722 type = get_attr_type (insn);
30723 if (type == TYPE_MUL
30724 || type == TYPE_DIV
30725 || type == TYPE_SDIV
30726 || type == TYPE_DDIV
30727 || type == TYPE_SSQRT
30728 || type == TYPE_DSQRT
30729 || type == TYPE_MFCR
30730 || type == TYPE_MFCRF
30731 || type == TYPE_MFJMPR)
30733 return true;
30735 return false;
30739 /* Return how many instructions the machine can issue per cycle. */
30741 static int
30742 rs6000_issue_rate (void)
30744 /* Unless scheduling for register pressure, use issue rate of 1 for
30745 first scheduling pass to decrease degradation. */
30746 if (!reload_completed && !flag_sched_pressure)
30747 return 1;
30749 switch (rs6000_cpu_attr) {
30750 case CPU_RS64A:
30751 case CPU_PPC601: /* ? */
30752 case CPU_PPC7450:
30753 return 3;
30754 case CPU_PPC440:
30755 case CPU_PPC603:
30756 case CPU_PPC750:
30757 case CPU_PPC7400:
30758 case CPU_PPC8540:
30759 case CPU_PPC8548:
30760 case CPU_CELL:
30761 case CPU_PPCE300C2:
30762 case CPU_PPCE300C3:
30763 case CPU_PPCE500MC:
30764 case CPU_PPCE500MC64:
30765 case CPU_PPCE5500:
30766 case CPU_PPCE6500:
30767 case CPU_TITAN:
30768 return 2;
30769 case CPU_PPC476:
30770 case CPU_PPC604:
30771 case CPU_PPC604E:
30772 case CPU_PPC620:
30773 case CPU_PPC630:
30774 return 4;
30775 case CPU_POWER4:
30776 case CPU_POWER5:
30777 case CPU_POWER6:
30778 case CPU_POWER7:
30779 return 5;
30780 case CPU_POWER8:
30781 return 7;
30782 case CPU_POWER9:
30783 return 6;
30784 default:
30785 return 1;
30789 /* Return how many instructions to look ahead for better insn
30790 scheduling. */
30792 static int
30793 rs6000_use_sched_lookahead (void)
30795 switch (rs6000_cpu_attr)
30797 case CPU_PPC8540:
30798 case CPU_PPC8548:
30799 return 4;
30801 case CPU_CELL:
30802 return (reload_completed ? 8 : 0);
30804 default:
30805 return 0;
30809 /* We are choosing insn from the ready queue. Return zero if INSN can be
30810 chosen. */
30811 static int
30812 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30814 if (ready_index == 0)
30815 return 0;
30817 if (rs6000_cpu_attr != CPU_CELL)
30818 return 0;
30820 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30822 if (!reload_completed
30823 || is_nonpipeline_insn (insn)
30824 || is_microcoded_insn (insn))
30825 return 1;
30827 return 0;
30830 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30831 and return true. */
30833 static bool
30834 find_mem_ref (rtx pat, rtx *mem_ref)
30836 const char * fmt;
30837 int i, j;
30839 /* stack_tie does not produce any real memory traffic. */
30840 if (tie_operand (pat, VOIDmode))
30841 return false;
30843 if (GET_CODE (pat) == MEM)
30845 *mem_ref = pat;
30846 return true;
30849 /* Recursively process the pattern. */
30850 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30852 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30854 if (fmt[i] == 'e')
30856 if (find_mem_ref (XEXP (pat, i), mem_ref))
30857 return true;
30859 else if (fmt[i] == 'E')
30860 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30862 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30863 return true;
30867 return false;
30870 /* Determine if PAT is a PATTERN of a load insn. */
30872 static bool
30873 is_load_insn1 (rtx pat, rtx *load_mem)
30875 if (!pat || pat == NULL_RTX)
30876 return false;
30878 if (GET_CODE (pat) == SET)
30879 return find_mem_ref (SET_SRC (pat), load_mem);
30881 if (GET_CODE (pat) == PARALLEL)
30883 int i;
30885 for (i = 0; i < XVECLEN (pat, 0); i++)
30886 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30887 return true;
30890 return false;
30893 /* Determine if INSN loads from memory. */
30895 static bool
30896 is_load_insn (rtx insn, rtx *load_mem)
30898 if (!insn || !INSN_P (insn))
30899 return false;
30901 if (CALL_P (insn))
30902 return false;
30904 return is_load_insn1 (PATTERN (insn), load_mem);
30907 /* Determine if PAT is a PATTERN of a store insn. */
30909 static bool
30910 is_store_insn1 (rtx pat, rtx *str_mem)
30912 if (!pat || pat == NULL_RTX)
30913 return false;
30915 if (GET_CODE (pat) == SET)
30916 return find_mem_ref (SET_DEST (pat), str_mem);
30918 if (GET_CODE (pat) == PARALLEL)
30920 int i;
30922 for (i = 0; i < XVECLEN (pat, 0); i++)
30923 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30924 return true;
30927 return false;
30930 /* Determine if INSN stores to memory. */
30932 static bool
30933 is_store_insn (rtx insn, rtx *str_mem)
30935 if (!insn || !INSN_P (insn))
30936 return false;
30938 return is_store_insn1 (PATTERN (insn), str_mem);
30941 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30943 static bool
30944 is_power9_pairable_vec_type (enum attr_type type)
30946 switch (type)
30948 case TYPE_VECSIMPLE:
30949 case TYPE_VECCOMPLEX:
30950 case TYPE_VECDIV:
30951 case TYPE_VECCMP:
30952 case TYPE_VECPERM:
30953 case TYPE_VECFLOAT:
30954 case TYPE_VECFDIV:
30955 case TYPE_VECDOUBLE:
30956 return true;
30957 default:
30958 break;
30960 return false;
30963 /* Returns whether the dependence between INSN and NEXT is considered
30964 costly by the given target. */
30966 static bool
30967 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30969 rtx insn;
30970 rtx next;
30971 rtx load_mem, str_mem;
30973 /* If the flag is not enabled - no dependence is considered costly;
30974 allow all dependent insns in the same group.
30975 This is the most aggressive option. */
30976 if (rs6000_sched_costly_dep == no_dep_costly)
30977 return false;
30979 /* If the flag is set to 1 - a dependence is always considered costly;
30980 do not allow dependent instructions in the same group.
30981 This is the most conservative option. */
30982 if (rs6000_sched_costly_dep == all_deps_costly)
30983 return true;
30985 insn = DEP_PRO (dep);
30986 next = DEP_CON (dep);
30988 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30989 && is_load_insn (next, &load_mem)
30990 && is_store_insn (insn, &str_mem))
30991 /* Prevent load after store in the same group. */
30992 return true;
30994 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30995 && is_load_insn (next, &load_mem)
30996 && is_store_insn (insn, &str_mem)
30997 && DEP_TYPE (dep) == REG_DEP_TRUE
30998 && mem_locations_overlap(str_mem, load_mem))
30999 /* Prevent load after store in the same group if it is a true
31000 dependence. */
31001 return true;
31003 /* The flag is set to X; dependences with latency >= X are considered costly,
31004 and will not be scheduled in the same group. */
31005 if (rs6000_sched_costly_dep <= max_dep_latency
31006 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31007 return true;
31009 return false;
31012 /* Return the next insn after INSN that is found before TAIL is reached,
31013 skipping any "non-active" insns - insns that will not actually occupy
31014 an issue slot. Return NULL_RTX if such an insn is not found. */
31016 static rtx_insn *
31017 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31019 if (insn == NULL_RTX || insn == tail)
31020 return NULL;
31022 while (1)
31024 insn = NEXT_INSN (insn);
31025 if (insn == NULL_RTX || insn == tail)
31026 return NULL;
31028 if (CALL_P (insn)
31029 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31030 || (NONJUMP_INSN_P (insn)
31031 && GET_CODE (PATTERN (insn)) != USE
31032 && GET_CODE (PATTERN (insn)) != CLOBBER
31033 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31034 break;
31036 return insn;
31039 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31041 static int
31042 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31044 int pos;
31045 int i;
31046 rtx_insn *tmp;
31047 enum attr_type type, type2;
31049 type = get_attr_type (last_scheduled_insn);
31051 /* Try to issue fixed point divides back-to-back in pairs so they will be
31052 routed to separate execution units and execute in parallel. */
31053 if (type == TYPE_DIV && divide_cnt == 0)
31055 /* First divide has been scheduled. */
31056 divide_cnt = 1;
31058 /* Scan the ready list looking for another divide, if found move it
31059 to the end of the list so it is chosen next. */
31060 pos = lastpos;
31061 while (pos >= 0)
31063 if (recog_memoized (ready[pos]) >= 0
31064 && get_attr_type (ready[pos]) == TYPE_DIV)
31066 tmp = ready[pos];
31067 for (i = pos; i < lastpos; i++)
31068 ready[i] = ready[i + 1];
31069 ready[lastpos] = tmp;
31070 break;
31072 pos--;
31075 else
31077 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31078 divide_cnt = 0;
31080 /* The best dispatch throughput for vector and vector load insns can be
31081 achieved by interleaving a vector and vector load such that they'll
31082 dispatch to the same superslice. If this pairing cannot be achieved
31083 then it is best to pair vector insns together and vector load insns
31084 together.
31086 To aid in this pairing, vec_pairing maintains the current state with
31087 the following values:
31089 0 : Initial state, no vecload/vector pairing has been started.
31091 1 : A vecload or vector insn has been issued and a candidate for
31092 pairing has been found and moved to the end of the ready
31093 list. */
31094 if (type == TYPE_VECLOAD)
31096 /* Issued a vecload. */
31097 if (vec_pairing == 0)
31099 int vecload_pos = -1;
31100 /* We issued a single vecload, look for a vector insn to pair it
31101 with. If one isn't found, try to pair another vecload. */
31102 pos = lastpos;
31103 while (pos >= 0)
31105 if (recog_memoized (ready[pos]) >= 0)
31107 type2 = get_attr_type (ready[pos]);
31108 if (is_power9_pairable_vec_type (type2))
31110 /* Found a vector insn to pair with, move it to the
31111 end of the ready list so it is scheduled next. */
31112 tmp = ready[pos];
31113 for (i = pos; i < lastpos; i++)
31114 ready[i] = ready[i + 1];
31115 ready[lastpos] = tmp;
31116 vec_pairing = 1;
31117 return cached_can_issue_more;
31119 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31120 /* Remember position of first vecload seen. */
31121 vecload_pos = pos;
31123 pos--;
31125 if (vecload_pos >= 0)
31127 /* Didn't find a vector to pair with but did find a vecload,
31128 move it to the end of the ready list. */
31129 tmp = ready[vecload_pos];
31130 for (i = vecload_pos; i < lastpos; i++)
31131 ready[i] = ready[i + 1];
31132 ready[lastpos] = tmp;
31133 vec_pairing = 1;
31134 return cached_can_issue_more;
31138 else if (is_power9_pairable_vec_type (type))
31140 /* Issued a vector operation. */
31141 if (vec_pairing == 0)
31143 int vec_pos = -1;
31144 /* We issued a single vector insn, look for a vecload to pair it
31145 with. If one isn't found, try to pair another vector. */
31146 pos = lastpos;
31147 while (pos >= 0)
31149 if (recog_memoized (ready[pos]) >= 0)
31151 type2 = get_attr_type (ready[pos]);
31152 if (type2 == TYPE_VECLOAD)
31154 /* Found a vecload insn to pair with, move it to the
31155 end of the ready list so it is scheduled next. */
31156 tmp = ready[pos];
31157 for (i = pos; i < lastpos; i++)
31158 ready[i] = ready[i + 1];
31159 ready[lastpos] = tmp;
31160 vec_pairing = 1;
31161 return cached_can_issue_more;
31163 else if (is_power9_pairable_vec_type (type2)
31164 && vec_pos == -1)
31165 /* Remember position of first vector insn seen. */
31166 vec_pos = pos;
31168 pos--;
31170 if (vec_pos >= 0)
31172 /* Didn't find a vecload to pair with but did find a vector
31173 insn, move it to the end of the ready list. */
31174 tmp = ready[vec_pos];
31175 for (i = vec_pos; i < lastpos; i++)
31176 ready[i] = ready[i + 1];
31177 ready[lastpos] = tmp;
31178 vec_pairing = 1;
31179 return cached_can_issue_more;
31184 /* We've either finished a vec/vecload pair, couldn't find an insn to
31185 continue the current pair, or the last insn had nothing to do with
31186 with pairing. In any case, reset the state. */
31187 vec_pairing = 0;
31190 return cached_can_issue_more;
31193 /* We are about to begin issuing insns for this clock cycle. */
31195 static int
31196 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31197 rtx_insn **ready ATTRIBUTE_UNUSED,
31198 int *pn_ready ATTRIBUTE_UNUSED,
31199 int clock_var ATTRIBUTE_UNUSED)
31201 int n_ready = *pn_ready;
31203 if (sched_verbose)
31204 fprintf (dump, "// rs6000_sched_reorder :\n");
31206 /* Reorder the ready list, if the second to last ready insn
31207 is a nonepipeline insn. */
31208 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31210 if (is_nonpipeline_insn (ready[n_ready - 1])
31211 && (recog_memoized (ready[n_ready - 2]) > 0))
31212 /* Simply swap first two insns. */
31213 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31216 if (rs6000_cpu == PROCESSOR_POWER6)
31217 load_store_pendulum = 0;
31219 return rs6000_issue_rate ();
31222 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31224 static int
31225 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31226 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31228 if (sched_verbose)
31229 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31231 /* For Power6, we need to handle some special cases to try and keep the
31232 store queue from overflowing and triggering expensive flushes.
31234 This code monitors how load and store instructions are being issued
31235 and skews the ready list one way or the other to increase the likelihood
31236 that a desired instruction is issued at the proper time.
31238 A couple of things are done. First, we maintain a "load_store_pendulum"
31239 to track the current state of load/store issue.
31241 - If the pendulum is at zero, then no loads or stores have been
31242 issued in the current cycle so we do nothing.
31244 - If the pendulum is 1, then a single load has been issued in this
31245 cycle and we attempt to locate another load in the ready list to
31246 issue with it.
31248 - If the pendulum is -2, then two stores have already been
31249 issued in this cycle, so we increase the priority of the first load
31250 in the ready list to increase it's likelihood of being chosen first
31251 in the next cycle.
31253 - If the pendulum is -1, then a single store has been issued in this
31254 cycle and we attempt to locate another store in the ready list to
31255 issue with it, preferring a store to an adjacent memory location to
31256 facilitate store pairing in the store queue.
31258 - If the pendulum is 2, then two loads have already been
31259 issued in this cycle, so we increase the priority of the first store
31260 in the ready list to increase it's likelihood of being chosen first
31261 in the next cycle.
31263 - If the pendulum < -2 or > 2, then do nothing.
31265 Note: This code covers the most common scenarios. There exist non
31266 load/store instructions which make use of the LSU and which
31267 would need to be accounted for to strictly model the behavior
31268 of the machine. Those instructions are currently unaccounted
31269 for to help minimize compile time overhead of this code.
31271 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31273 int pos;
31274 int i;
31275 rtx_insn *tmp;
31276 rtx load_mem, str_mem;
31278 if (is_store_insn (last_scheduled_insn, &str_mem))
31279 /* Issuing a store, swing the load_store_pendulum to the left */
31280 load_store_pendulum--;
31281 else if (is_load_insn (last_scheduled_insn, &load_mem))
31282 /* Issuing a load, swing the load_store_pendulum to the right */
31283 load_store_pendulum++;
31284 else
31285 return cached_can_issue_more;
31287 /* If the pendulum is balanced, or there is only one instruction on
31288 the ready list, then all is well, so return. */
31289 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31290 return cached_can_issue_more;
31292 if (load_store_pendulum == 1)
31294 /* A load has been issued in this cycle. Scan the ready list
31295 for another load to issue with it */
31296 pos = *pn_ready-1;
31298 while (pos >= 0)
31300 if (is_load_insn (ready[pos], &load_mem))
31302 /* Found a load. Move it to the head of the ready list,
31303 and adjust it's priority so that it is more likely to
31304 stay there */
31305 tmp = ready[pos];
31306 for (i=pos; i<*pn_ready-1; i++)
31307 ready[i] = ready[i + 1];
31308 ready[*pn_ready-1] = tmp;
31310 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31311 INSN_PRIORITY (tmp)++;
31312 break;
31314 pos--;
31317 else if (load_store_pendulum == -2)
31319 /* Two stores have been issued in this cycle. Increase the
31320 priority of the first load in the ready list to favor it for
31321 issuing in the next cycle. */
31322 pos = *pn_ready-1;
31324 while (pos >= 0)
31326 if (is_load_insn (ready[pos], &load_mem)
31327 && !sel_sched_p ()
31328 && INSN_PRIORITY_KNOWN (ready[pos]))
31330 INSN_PRIORITY (ready[pos])++;
31332 /* Adjust the pendulum to account for the fact that a load
31333 was found and increased in priority. This is to prevent
31334 increasing the priority of multiple loads */
31335 load_store_pendulum--;
31337 break;
31339 pos--;
31342 else if (load_store_pendulum == -1)
31344 /* A store has been issued in this cycle. Scan the ready list for
31345 another store to issue with it, preferring a store to an adjacent
31346 memory location */
31347 int first_store_pos = -1;
31349 pos = *pn_ready-1;
31351 while (pos >= 0)
31353 if (is_store_insn (ready[pos], &str_mem))
31355 rtx str_mem2;
31356 /* Maintain the index of the first store found on the
31357 list */
31358 if (first_store_pos == -1)
31359 first_store_pos = pos;
31361 if (is_store_insn (last_scheduled_insn, &str_mem2)
31362 && adjacent_mem_locations (str_mem, str_mem2))
31364 /* Found an adjacent store. Move it to the head of the
31365 ready list, and adjust it's priority so that it is
31366 more likely to stay there */
31367 tmp = ready[pos];
31368 for (i=pos; i<*pn_ready-1; i++)
31369 ready[i] = ready[i + 1];
31370 ready[*pn_ready-1] = tmp;
31372 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31373 INSN_PRIORITY (tmp)++;
31375 first_store_pos = -1;
31377 break;
31380 pos--;
31383 if (first_store_pos >= 0)
31385 /* An adjacent store wasn't found, but a non-adjacent store was,
31386 so move the non-adjacent store to the front of the ready
31387 list, and adjust its priority so that it is more likely to
31388 stay there. */
31389 tmp = ready[first_store_pos];
31390 for (i=first_store_pos; i<*pn_ready-1; i++)
31391 ready[i] = ready[i + 1];
31392 ready[*pn_ready-1] = tmp;
31393 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31394 INSN_PRIORITY (tmp)++;
31397 else if (load_store_pendulum == 2)
31399 /* Two loads have been issued in this cycle. Increase the priority
31400 of the first store in the ready list to favor it for issuing in
31401 the next cycle. */
31402 pos = *pn_ready-1;
31404 while (pos >= 0)
31406 if (is_store_insn (ready[pos], &str_mem)
31407 && !sel_sched_p ()
31408 && INSN_PRIORITY_KNOWN (ready[pos]))
31410 INSN_PRIORITY (ready[pos])++;
31412 /* Adjust the pendulum to account for the fact that a store
31413 was found and increased in priority. This is to prevent
31414 increasing the priority of multiple stores */
31415 load_store_pendulum++;
31417 break;
31419 pos--;
31424 /* Do Power9 dependent reordering if necessary. */
31425 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31426 && recog_memoized (last_scheduled_insn) >= 0)
31427 return power9_sched_reorder2 (ready, *pn_ready - 1);
31429 return cached_can_issue_more;
31432 /* Return whether the presence of INSN causes a dispatch group termination
31433 of group WHICH_GROUP.
31435 If WHICH_GROUP == current_group, this function will return true if INSN
31436 causes the termination of the current group (i.e, the dispatch group to
31437 which INSN belongs). This means that INSN will be the last insn in the
31438 group it belongs to.
31440 If WHICH_GROUP == previous_group, this function will return true if INSN
31441 causes the termination of the previous group (i.e, the dispatch group that
31442 precedes the group to which INSN belongs). This means that INSN will be
31443 the first insn in the group it belongs to). */
31445 static bool
31446 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31448 bool first, last;
31450 if (! insn)
31451 return false;
31453 first = insn_must_be_first_in_group (insn);
31454 last = insn_must_be_last_in_group (insn);
31456 if (first && last)
31457 return true;
31459 if (which_group == current_group)
31460 return last;
31461 else if (which_group == previous_group)
31462 return first;
31464 return false;
31468 static bool
31469 insn_must_be_first_in_group (rtx_insn *insn)
31471 enum attr_type type;
31473 if (!insn
31474 || NOTE_P (insn)
31475 || DEBUG_INSN_P (insn)
31476 || GET_CODE (PATTERN (insn)) == USE
31477 || GET_CODE (PATTERN (insn)) == CLOBBER)
31478 return false;
31480 switch (rs6000_cpu)
31482 case PROCESSOR_POWER5:
31483 if (is_cracked_insn (insn))
31484 return true;
31485 /* FALLTHRU */
31486 case PROCESSOR_POWER4:
31487 if (is_microcoded_insn (insn))
31488 return true;
31490 if (!rs6000_sched_groups)
31491 return false;
31493 type = get_attr_type (insn);
31495 switch (type)
31497 case TYPE_MFCR:
31498 case TYPE_MFCRF:
31499 case TYPE_MTCR:
31500 case TYPE_DELAYED_CR:
31501 case TYPE_CR_LOGICAL:
31502 case TYPE_MTJMPR:
31503 case TYPE_MFJMPR:
31504 case TYPE_DIV:
31505 case TYPE_LOAD_L:
31506 case TYPE_STORE_C:
31507 case TYPE_ISYNC:
31508 case TYPE_SYNC:
31509 return true;
31510 default:
31511 break;
31513 break;
31514 case PROCESSOR_POWER6:
31515 type = get_attr_type (insn);
31517 switch (type)
31519 case TYPE_EXTS:
31520 case TYPE_CNTLZ:
31521 case TYPE_TRAP:
31522 case TYPE_MUL:
31523 case TYPE_INSERT:
31524 case TYPE_FPCOMPARE:
31525 case TYPE_MFCR:
31526 case TYPE_MTCR:
31527 case TYPE_MFJMPR:
31528 case TYPE_MTJMPR:
31529 case TYPE_ISYNC:
31530 case TYPE_SYNC:
31531 case TYPE_LOAD_L:
31532 case TYPE_STORE_C:
31533 return true;
31534 case TYPE_SHIFT:
31535 if (get_attr_dot (insn) == DOT_NO
31536 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31537 return true;
31538 else
31539 break;
31540 case TYPE_DIV:
31541 if (get_attr_size (insn) == SIZE_32)
31542 return true;
31543 else
31544 break;
31545 case TYPE_LOAD:
31546 case TYPE_STORE:
31547 case TYPE_FPLOAD:
31548 case TYPE_FPSTORE:
31549 if (get_attr_update (insn) == UPDATE_YES)
31550 return true;
31551 else
31552 break;
31553 default:
31554 break;
31556 break;
31557 case PROCESSOR_POWER7:
31558 type = get_attr_type (insn);
31560 switch (type)
31562 case TYPE_CR_LOGICAL:
31563 case TYPE_MFCR:
31564 case TYPE_MFCRF:
31565 case TYPE_MTCR:
31566 case TYPE_DIV:
31567 case TYPE_ISYNC:
31568 case TYPE_LOAD_L:
31569 case TYPE_STORE_C:
31570 case TYPE_MFJMPR:
31571 case TYPE_MTJMPR:
31572 return true;
31573 case TYPE_MUL:
31574 case TYPE_SHIFT:
31575 case TYPE_EXTS:
31576 if (get_attr_dot (insn) == DOT_YES)
31577 return true;
31578 else
31579 break;
31580 case TYPE_LOAD:
31581 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31582 || get_attr_update (insn) == UPDATE_YES)
31583 return true;
31584 else
31585 break;
31586 case TYPE_STORE:
31587 case TYPE_FPLOAD:
31588 case TYPE_FPSTORE:
31589 if (get_attr_update (insn) == UPDATE_YES)
31590 return true;
31591 else
31592 break;
31593 default:
31594 break;
31596 break;
31597 case PROCESSOR_POWER8:
31598 type = get_attr_type (insn);
31600 switch (type)
31602 case TYPE_CR_LOGICAL:
31603 case TYPE_DELAYED_CR:
31604 case TYPE_MFCR:
31605 case TYPE_MFCRF:
31606 case TYPE_MTCR:
31607 case TYPE_SYNC:
31608 case TYPE_ISYNC:
31609 case TYPE_LOAD_L:
31610 case TYPE_STORE_C:
31611 case TYPE_VECSTORE:
31612 case TYPE_MFJMPR:
31613 case TYPE_MTJMPR:
31614 return true;
31615 case TYPE_SHIFT:
31616 case TYPE_EXTS:
31617 case TYPE_MUL:
31618 if (get_attr_dot (insn) == DOT_YES)
31619 return true;
31620 else
31621 break;
31622 case TYPE_LOAD:
31623 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31624 || get_attr_update (insn) == UPDATE_YES)
31625 return true;
31626 else
31627 break;
31628 case TYPE_STORE:
31629 if (get_attr_update (insn) == UPDATE_YES
31630 && get_attr_indexed (insn) == INDEXED_YES)
31631 return true;
31632 else
31633 break;
31634 default:
31635 break;
31637 break;
31638 default:
31639 break;
31642 return false;
31645 static bool
31646 insn_must_be_last_in_group (rtx_insn *insn)
31648 enum attr_type type;
31650 if (!insn
31651 || NOTE_P (insn)
31652 || DEBUG_INSN_P (insn)
31653 || GET_CODE (PATTERN (insn)) == USE
31654 || GET_CODE (PATTERN (insn)) == CLOBBER)
31655 return false;
31657 switch (rs6000_cpu) {
31658 case PROCESSOR_POWER4:
31659 case PROCESSOR_POWER5:
31660 if (is_microcoded_insn (insn))
31661 return true;
31663 if (is_branch_slot_insn (insn))
31664 return true;
31666 break;
31667 case PROCESSOR_POWER6:
31668 type = get_attr_type (insn);
31670 switch (type)
31672 case TYPE_EXTS:
31673 case TYPE_CNTLZ:
31674 case TYPE_TRAP:
31675 case TYPE_MUL:
31676 case TYPE_FPCOMPARE:
31677 case TYPE_MFCR:
31678 case TYPE_MTCR:
31679 case TYPE_MFJMPR:
31680 case TYPE_MTJMPR:
31681 case TYPE_ISYNC:
31682 case TYPE_SYNC:
31683 case TYPE_LOAD_L:
31684 case TYPE_STORE_C:
31685 return true;
31686 case TYPE_SHIFT:
31687 if (get_attr_dot (insn) == DOT_NO
31688 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31689 return true;
31690 else
31691 break;
31692 case TYPE_DIV:
31693 if (get_attr_size (insn) == SIZE_32)
31694 return true;
31695 else
31696 break;
31697 default:
31698 break;
31700 break;
31701 case PROCESSOR_POWER7:
31702 type = get_attr_type (insn);
31704 switch (type)
31706 case TYPE_ISYNC:
31707 case TYPE_SYNC:
31708 case TYPE_LOAD_L:
31709 case TYPE_STORE_C:
31710 return true;
31711 case TYPE_LOAD:
31712 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31713 && get_attr_update (insn) == UPDATE_YES)
31714 return true;
31715 else
31716 break;
31717 case TYPE_STORE:
31718 if (get_attr_update (insn) == UPDATE_YES
31719 && get_attr_indexed (insn) == INDEXED_YES)
31720 return true;
31721 else
31722 break;
31723 default:
31724 break;
31726 break;
31727 case PROCESSOR_POWER8:
31728 type = get_attr_type (insn);
31730 switch (type)
31732 case TYPE_MFCR:
31733 case TYPE_MTCR:
31734 case TYPE_ISYNC:
31735 case TYPE_SYNC:
31736 case TYPE_LOAD_L:
31737 case TYPE_STORE_C:
31738 return true;
31739 case TYPE_LOAD:
31740 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31741 && get_attr_update (insn) == UPDATE_YES)
31742 return true;
31743 else
31744 break;
31745 case TYPE_STORE:
31746 if (get_attr_update (insn) == UPDATE_YES
31747 && get_attr_indexed (insn) == INDEXED_YES)
31748 return true;
31749 else
31750 break;
31751 default:
31752 break;
31754 break;
31755 default:
31756 break;
31759 return false;
31762 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31763 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31765 static bool
31766 is_costly_group (rtx *group_insns, rtx next_insn)
31768 int i;
31769 int issue_rate = rs6000_issue_rate ();
31771 for (i = 0; i < issue_rate; i++)
31773 sd_iterator_def sd_it;
31774 dep_t dep;
31775 rtx insn = group_insns[i];
31777 if (!insn)
31778 continue;
31780 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31782 rtx next = DEP_CON (dep);
31784 if (next == next_insn
31785 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31786 return true;
31790 return false;
31793 /* Utility of the function redefine_groups.
31794 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31795 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31796 to keep it "far" (in a separate group) from GROUP_INSNS, following
31797 one of the following schemes, depending on the value of the flag
31798 -minsert_sched_nops = X:
31799 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31800 in order to force NEXT_INSN into a separate group.
31801 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31802 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31803 insertion (has a group just ended, how many vacant issue slots remain in the
31804 last group, and how many dispatch groups were encountered so far). */
31806 static int
31807 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31808 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31809 int *group_count)
31811 rtx nop;
31812 bool force;
31813 int issue_rate = rs6000_issue_rate ();
31814 bool end = *group_end;
31815 int i;
31817 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31818 return can_issue_more;
31820 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31821 return can_issue_more;
31823 force = is_costly_group (group_insns, next_insn);
31824 if (!force)
31825 return can_issue_more;
31827 if (sched_verbose > 6)
31828 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31829 *group_count ,can_issue_more);
31831 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31833 if (*group_end)
31834 can_issue_more = 0;
31836 /* Since only a branch can be issued in the last issue_slot, it is
31837 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31838 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31839 in this case the last nop will start a new group and the branch
31840 will be forced to the new group. */
31841 if (can_issue_more && !is_branch_slot_insn (next_insn))
31842 can_issue_more--;
31844 /* Do we have a special group ending nop? */
31845 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31846 || rs6000_cpu_attr == CPU_POWER8)
31848 nop = gen_group_ending_nop ();
31849 emit_insn_before (nop, next_insn);
31850 can_issue_more = 0;
31852 else
31853 while (can_issue_more > 0)
31855 nop = gen_nop ();
31856 emit_insn_before (nop, next_insn);
31857 can_issue_more--;
31860 *group_end = true;
31861 return 0;
31864 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31866 int n_nops = rs6000_sched_insert_nops;
31868 /* Nops can't be issued from the branch slot, so the effective
31869 issue_rate for nops is 'issue_rate - 1'. */
31870 if (can_issue_more == 0)
31871 can_issue_more = issue_rate;
31872 can_issue_more--;
31873 if (can_issue_more == 0)
31875 can_issue_more = issue_rate - 1;
31876 (*group_count)++;
31877 end = true;
31878 for (i = 0; i < issue_rate; i++)
31880 group_insns[i] = 0;
31884 while (n_nops > 0)
31886 nop = gen_nop ();
31887 emit_insn_before (nop, next_insn);
31888 if (can_issue_more == issue_rate - 1) /* new group begins */
31889 end = false;
31890 can_issue_more--;
31891 if (can_issue_more == 0)
31893 can_issue_more = issue_rate - 1;
31894 (*group_count)++;
31895 end = true;
31896 for (i = 0; i < issue_rate; i++)
31898 group_insns[i] = 0;
31901 n_nops--;
31904 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31905 can_issue_more++;
31907 /* Is next_insn going to start a new group? */
31908 *group_end
31909 = (end
31910 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31911 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31912 || (can_issue_more < issue_rate &&
31913 insn_terminates_group_p (next_insn, previous_group)));
31914 if (*group_end && end)
31915 (*group_count)--;
31917 if (sched_verbose > 6)
31918 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31919 *group_count, can_issue_more);
31920 return can_issue_more;
31923 return can_issue_more;
31926 /* This function tries to synch the dispatch groups that the compiler "sees"
31927 with the dispatch groups that the processor dispatcher is expected to
31928 form in practice. It tries to achieve this synchronization by forcing the
31929 estimated processor grouping on the compiler (as opposed to the function
31930 'pad_goups' which tries to force the scheduler's grouping on the processor).
31932 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31933 examines the (estimated) dispatch groups that will be formed by the processor
31934 dispatcher. It marks these group boundaries to reflect the estimated
31935 processor grouping, overriding the grouping that the scheduler had marked.
31936 Depending on the value of the flag '-minsert-sched-nops' this function can
31937 force certain insns into separate groups or force a certain distance between
31938 them by inserting nops, for example, if there exists a "costly dependence"
31939 between the insns.
31941 The function estimates the group boundaries that the processor will form as
31942 follows: It keeps track of how many vacant issue slots are available after
31943 each insn. A subsequent insn will start a new group if one of the following
31944 4 cases applies:
31945 - no more vacant issue slots remain in the current dispatch group.
31946 - only the last issue slot, which is the branch slot, is vacant, but the next
31947 insn is not a branch.
31948 - only the last 2 or less issue slots, including the branch slot, are vacant,
31949 which means that a cracked insn (which occupies two issue slots) can't be
31950 issued in this group.
31951 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31952 start a new group. */
31954 static int
31955 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31956 rtx_insn *tail)
31958 rtx_insn *insn, *next_insn;
31959 int issue_rate;
31960 int can_issue_more;
31961 int slot, i;
31962 bool group_end;
31963 int group_count = 0;
31964 rtx *group_insns;
31966 /* Initialize. */
31967 issue_rate = rs6000_issue_rate ();
31968 group_insns = XALLOCAVEC (rtx, issue_rate);
31969 for (i = 0; i < issue_rate; i++)
31971 group_insns[i] = 0;
31973 can_issue_more = issue_rate;
31974 slot = 0;
31975 insn = get_next_active_insn (prev_head_insn, tail);
31976 group_end = false;
31978 while (insn != NULL_RTX)
31980 slot = (issue_rate - can_issue_more);
31981 group_insns[slot] = insn;
31982 can_issue_more =
31983 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31984 if (insn_terminates_group_p (insn, current_group))
31985 can_issue_more = 0;
31987 next_insn = get_next_active_insn (insn, tail);
31988 if (next_insn == NULL_RTX)
31989 return group_count + 1;
31991 /* Is next_insn going to start a new group? */
31992 group_end
31993 = (can_issue_more == 0
31994 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31995 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31996 || (can_issue_more < issue_rate &&
31997 insn_terminates_group_p (next_insn, previous_group)));
31999 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32000 next_insn, &group_end, can_issue_more,
32001 &group_count);
32003 if (group_end)
32005 group_count++;
32006 can_issue_more = 0;
32007 for (i = 0; i < issue_rate; i++)
32009 group_insns[i] = 0;
32013 if (GET_MODE (next_insn) == TImode && can_issue_more)
32014 PUT_MODE (next_insn, VOIDmode);
32015 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32016 PUT_MODE (next_insn, TImode);
32018 insn = next_insn;
32019 if (can_issue_more == 0)
32020 can_issue_more = issue_rate;
32021 } /* while */
32023 return group_count;
32026 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32027 dispatch group boundaries that the scheduler had marked. Pad with nops
32028 any dispatch groups which have vacant issue slots, in order to force the
32029 scheduler's grouping on the processor dispatcher. The function
32030 returns the number of dispatch groups found. */
32032 static int
32033 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32034 rtx_insn *tail)
32036 rtx_insn *insn, *next_insn;
32037 rtx nop;
32038 int issue_rate;
32039 int can_issue_more;
32040 int group_end;
32041 int group_count = 0;
32043 /* Initialize issue_rate. */
32044 issue_rate = rs6000_issue_rate ();
32045 can_issue_more = issue_rate;
32047 insn = get_next_active_insn (prev_head_insn, tail);
32048 next_insn = get_next_active_insn (insn, tail);
32050 while (insn != NULL_RTX)
32052 can_issue_more =
32053 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32055 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32057 if (next_insn == NULL_RTX)
32058 break;
32060 if (group_end)
32062 /* If the scheduler had marked group termination at this location
32063 (between insn and next_insn), and neither insn nor next_insn will
32064 force group termination, pad the group with nops to force group
32065 termination. */
32066 if (can_issue_more
32067 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32068 && !insn_terminates_group_p (insn, current_group)
32069 && !insn_terminates_group_p (next_insn, previous_group))
32071 if (!is_branch_slot_insn (next_insn))
32072 can_issue_more--;
32074 while (can_issue_more)
32076 nop = gen_nop ();
32077 emit_insn_before (nop, next_insn);
32078 can_issue_more--;
32082 can_issue_more = issue_rate;
32083 group_count++;
32086 insn = next_insn;
32087 next_insn = get_next_active_insn (insn, tail);
32090 return group_count;
32093 /* We're beginning a new block. Initialize data structures as necessary. */
32095 static void
32096 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32097 int sched_verbose ATTRIBUTE_UNUSED,
32098 int max_ready ATTRIBUTE_UNUSED)
32100 last_scheduled_insn = NULL;
32101 load_store_pendulum = 0;
32102 divide_cnt = 0;
32103 vec_pairing = 0;
32106 /* The following function is called at the end of scheduling BB.
32107 After reload, it inserts nops at insn group bundling. */
32109 static void
32110 rs6000_sched_finish (FILE *dump, int sched_verbose)
32112 int n_groups;
32114 if (sched_verbose)
32115 fprintf (dump, "=== Finishing schedule.\n");
32117 if (reload_completed && rs6000_sched_groups)
32119 /* Do not run sched_finish hook when selective scheduling enabled. */
32120 if (sel_sched_p ())
32121 return;
32123 if (rs6000_sched_insert_nops == sched_finish_none)
32124 return;
32126 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32127 n_groups = pad_groups (dump, sched_verbose,
32128 current_sched_info->prev_head,
32129 current_sched_info->next_tail);
32130 else
32131 n_groups = redefine_groups (dump, sched_verbose,
32132 current_sched_info->prev_head,
32133 current_sched_info->next_tail);
32135 if (sched_verbose >= 6)
32137 fprintf (dump, "ngroups = %d\n", n_groups);
32138 print_rtl (dump, current_sched_info->prev_head);
32139 fprintf (dump, "Done finish_sched\n");
32144 struct rs6000_sched_context
32146 short cached_can_issue_more;
32147 rtx_insn *last_scheduled_insn;
32148 int load_store_pendulum;
32149 int divide_cnt;
32150 int vec_pairing;
32153 typedef struct rs6000_sched_context rs6000_sched_context_def;
32154 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32156 /* Allocate store for new scheduling context. */
32157 static void *
32158 rs6000_alloc_sched_context (void)
32160 return xmalloc (sizeof (rs6000_sched_context_def));
32163 /* If CLEAN_P is true then initializes _SC with clean data,
32164 and from the global context otherwise. */
32165 static void
32166 rs6000_init_sched_context (void *_sc, bool clean_p)
32168 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32170 if (clean_p)
32172 sc->cached_can_issue_more = 0;
32173 sc->last_scheduled_insn = NULL;
32174 sc->load_store_pendulum = 0;
32175 sc->divide_cnt = 0;
32176 sc->vec_pairing = 0;
32178 else
32180 sc->cached_can_issue_more = cached_can_issue_more;
32181 sc->last_scheduled_insn = last_scheduled_insn;
32182 sc->load_store_pendulum = load_store_pendulum;
32183 sc->divide_cnt = divide_cnt;
32184 sc->vec_pairing = vec_pairing;
32188 /* Sets the global scheduling context to the one pointed to by _SC. */
32189 static void
32190 rs6000_set_sched_context (void *_sc)
32192 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32194 gcc_assert (sc != NULL);
32196 cached_can_issue_more = sc->cached_can_issue_more;
32197 last_scheduled_insn = sc->last_scheduled_insn;
32198 load_store_pendulum = sc->load_store_pendulum;
32199 divide_cnt = sc->divide_cnt;
32200 vec_pairing = sc->vec_pairing;
32203 /* Free _SC. */
32204 static void
32205 rs6000_free_sched_context (void *_sc)
32207 gcc_assert (_sc != NULL);
32209 free (_sc);
32212 static bool
32213 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32215 switch (get_attr_type (insn))
32217 case TYPE_DIV:
32218 case TYPE_SDIV:
32219 case TYPE_DDIV:
32220 case TYPE_VECDIV:
32221 case TYPE_SSQRT:
32222 case TYPE_DSQRT:
32223 return false;
32225 default:
32226 return true;
32230 /* Length in units of the trampoline for entering a nested function. */
32233 rs6000_trampoline_size (void)
32235 int ret = 0;
32237 switch (DEFAULT_ABI)
32239 default:
32240 gcc_unreachable ();
32242 case ABI_AIX:
32243 ret = (TARGET_32BIT) ? 12 : 24;
32244 break;
32246 case ABI_ELFv2:
32247 gcc_assert (!TARGET_32BIT);
32248 ret = 32;
32249 break;
32251 case ABI_DARWIN:
32252 case ABI_V4:
32253 ret = (TARGET_32BIT) ? 40 : 48;
32254 break;
32257 return ret;
32260 /* Emit RTL insns to initialize the variable parts of a trampoline.
32261 FNADDR is an RTX for the address of the function's pure code.
32262 CXT is an RTX for the static chain value for the function. */
32264 static void
32265 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32267 int regsize = (TARGET_32BIT) ? 4 : 8;
32268 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32269 rtx ctx_reg = force_reg (Pmode, cxt);
32270 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32272 switch (DEFAULT_ABI)
32274 default:
32275 gcc_unreachable ();
32277 /* Under AIX, just build the 3 word function descriptor */
32278 case ABI_AIX:
32280 rtx fnmem, fn_reg, toc_reg;
32282 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32283 error ("you cannot take the address of a nested function if you use "
32284 "the %qs option", "-mno-pointers-to-nested-functions");
32286 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32287 fn_reg = gen_reg_rtx (Pmode);
32288 toc_reg = gen_reg_rtx (Pmode);
32290 /* Macro to shorten the code expansions below. */
32291 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32293 m_tramp = replace_equiv_address (m_tramp, addr);
32295 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32296 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32297 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32298 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32299 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32301 # undef MEM_PLUS
32303 break;
32305 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32306 case ABI_ELFv2:
32307 case ABI_DARWIN:
32308 case ABI_V4:
32309 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32310 LCT_NORMAL, VOIDmode,
32311 addr, Pmode,
32312 GEN_INT (rs6000_trampoline_size ()), SImode,
32313 fnaddr, Pmode,
32314 ctx_reg, Pmode);
32315 break;
32320 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32321 identifier as an argument, so the front end shouldn't look it up. */
32323 static bool
32324 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32326 return is_attribute_p ("altivec", attr_id);
32329 /* Handle the "altivec" attribute. The attribute may have
32330 arguments as follows:
32332 __attribute__((altivec(vector__)))
32333 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32334 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32336 and may appear more than once (e.g., 'vector bool char') in a
32337 given declaration. */
32339 static tree
32340 rs6000_handle_altivec_attribute (tree *node,
32341 tree name ATTRIBUTE_UNUSED,
32342 tree args,
32343 int flags ATTRIBUTE_UNUSED,
32344 bool *no_add_attrs)
32346 tree type = *node, result = NULL_TREE;
32347 machine_mode mode;
32348 int unsigned_p;
32349 char altivec_type
32350 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32351 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32352 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32353 : '?');
32355 while (POINTER_TYPE_P (type)
32356 || TREE_CODE (type) == FUNCTION_TYPE
32357 || TREE_CODE (type) == METHOD_TYPE
32358 || TREE_CODE (type) == ARRAY_TYPE)
32359 type = TREE_TYPE (type);
32361 mode = TYPE_MODE (type);
32363 /* Check for invalid AltiVec type qualifiers. */
32364 if (type == long_double_type_node)
32365 error ("use of %<long double%> in AltiVec types is invalid");
32366 else if (type == boolean_type_node)
32367 error ("use of boolean types in AltiVec types is invalid");
32368 else if (TREE_CODE (type) == COMPLEX_TYPE)
32369 error ("use of %<complex%> in AltiVec types is invalid");
32370 else if (DECIMAL_FLOAT_MODE_P (mode))
32371 error ("use of decimal floating point types in AltiVec types is invalid");
32372 else if (!TARGET_VSX)
32374 if (type == long_unsigned_type_node || type == long_integer_type_node)
32376 if (TARGET_64BIT)
32377 error ("use of %<long%> in AltiVec types is invalid for "
32378 "64-bit code without %qs", "-mvsx");
32379 else if (rs6000_warn_altivec_long)
32380 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32381 "use %<int%>");
32383 else if (type == long_long_unsigned_type_node
32384 || type == long_long_integer_type_node)
32385 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32386 "-mvsx");
32387 else if (type == double_type_node)
32388 error ("use of %<double%> in AltiVec types is invalid without %qs",
32389 "-mvsx");
32392 switch (altivec_type)
32394 case 'v':
32395 unsigned_p = TYPE_UNSIGNED (type);
32396 switch (mode)
32398 case E_TImode:
32399 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32400 break;
32401 case E_DImode:
32402 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32403 break;
32404 case E_SImode:
32405 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32406 break;
32407 case E_HImode:
32408 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32409 break;
32410 case E_QImode:
32411 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32412 break;
32413 case E_SFmode: result = V4SF_type_node; break;
32414 case E_DFmode: result = V2DF_type_node; break;
32415 /* If the user says 'vector int bool', we may be handed the 'bool'
32416 attribute _before_ the 'vector' attribute, and so select the
32417 proper type in the 'b' case below. */
32418 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32419 case E_V2DImode: case E_V2DFmode:
32420 result = type;
32421 default: break;
32423 break;
32424 case 'b':
32425 switch (mode)
32427 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32428 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32429 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32430 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32431 default: break;
32433 break;
32434 case 'p':
32435 switch (mode)
32437 case E_V8HImode: result = pixel_V8HI_type_node;
32438 default: break;
32440 default: break;
32443 /* Propagate qualifiers attached to the element type
32444 onto the vector type. */
32445 if (result && result != type && TYPE_QUALS (type))
32446 result = build_qualified_type (result, TYPE_QUALS (type));
32448 *no_add_attrs = true; /* No need to hang on to the attribute. */
32450 if (result)
32451 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32453 return NULL_TREE;
32456 /* AltiVec defines four built-in scalar types that serve as vector
32457 elements; we must teach the compiler how to mangle them. */
32459 static const char *
32460 rs6000_mangle_type (const_tree type)
32462 type = TYPE_MAIN_VARIANT (type);
32464 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32465 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32466 return NULL;
32468 if (type == bool_char_type_node) return "U6__boolc";
32469 if (type == bool_short_type_node) return "U6__bools";
32470 if (type == pixel_type_node) return "u7__pixel";
32471 if (type == bool_int_type_node) return "U6__booli";
32472 if (type == bool_long_type_node) return "U6__booll";
32474 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32475 "g" for IBM extended double, no matter whether it is long double (using
32476 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32477 if (TARGET_FLOAT128_TYPE)
32479 if (type == ieee128_float_type_node)
32480 return "U10__float128";
32482 if (type == ibm128_float_type_node)
32483 return "g";
32485 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
32486 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32489 /* Mangle IBM extended float long double as `g' (__float128) on
32490 powerpc*-linux where long-double-64 previously was the default. */
32491 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32492 && TARGET_ELF
32493 && TARGET_LONG_DOUBLE_128
32494 && !TARGET_IEEEQUAD)
32495 return "g";
32497 /* For all other types, use normal C++ mangling. */
32498 return NULL;
32501 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32502 struct attribute_spec.handler. */
32504 static tree
32505 rs6000_handle_longcall_attribute (tree *node, tree name,
32506 tree args ATTRIBUTE_UNUSED,
32507 int flags ATTRIBUTE_UNUSED,
32508 bool *no_add_attrs)
32510 if (TREE_CODE (*node) != FUNCTION_TYPE
32511 && TREE_CODE (*node) != FIELD_DECL
32512 && TREE_CODE (*node) != TYPE_DECL)
32514 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32515 name);
32516 *no_add_attrs = true;
32519 return NULL_TREE;
32522 /* Set longcall attributes on all functions declared when
32523 rs6000_default_long_calls is true. */
32524 static void
32525 rs6000_set_default_type_attributes (tree type)
32527 if (rs6000_default_long_calls
32528 && (TREE_CODE (type) == FUNCTION_TYPE
32529 || TREE_CODE (type) == METHOD_TYPE))
32530 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32531 NULL_TREE,
32532 TYPE_ATTRIBUTES (type));
32534 #if TARGET_MACHO
32535 darwin_set_default_type_attributes (type);
32536 #endif
32539 /* Return a reference suitable for calling a function with the
32540 longcall attribute. */
32543 rs6000_longcall_ref (rtx call_ref)
32545 const char *call_name;
32546 tree node;
32548 if (GET_CODE (call_ref) != SYMBOL_REF)
32549 return call_ref;
32551 /* System V adds '.' to the internal name, so skip them. */
32552 call_name = XSTR (call_ref, 0);
32553 if (*call_name == '.')
32555 while (*call_name == '.')
32556 call_name++;
32558 node = get_identifier (call_name);
32559 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32562 return force_reg (Pmode, call_ref);
32565 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32566 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32567 #endif
32569 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32570 struct attribute_spec.handler. */
32571 static tree
32572 rs6000_handle_struct_attribute (tree *node, tree name,
32573 tree args ATTRIBUTE_UNUSED,
32574 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32576 tree *type = NULL;
32577 if (DECL_P (*node))
32579 if (TREE_CODE (*node) == TYPE_DECL)
32580 type = &TREE_TYPE (*node);
32582 else
32583 type = node;
32585 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32586 || TREE_CODE (*type) == UNION_TYPE)))
32588 warning (OPT_Wattributes, "%qE attribute ignored", name);
32589 *no_add_attrs = true;
32592 else if ((is_attribute_p ("ms_struct", name)
32593 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32594 || ((is_attribute_p ("gcc_struct", name)
32595 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32597 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32598 name);
32599 *no_add_attrs = true;
32602 return NULL_TREE;
32605 static bool
32606 rs6000_ms_bitfield_layout_p (const_tree record_type)
32608 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32609 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32610 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32613 #ifdef USING_ELFOS_H
32615 /* A get_unnamed_section callback, used for switching to toc_section. */
32617 static void
32618 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32620 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32621 && TARGET_MINIMAL_TOC)
32623 if (!toc_initialized)
32625 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32626 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32627 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32628 fprintf (asm_out_file, "\t.tc ");
32629 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32630 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32631 fprintf (asm_out_file, "\n");
32633 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32634 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32635 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32636 fprintf (asm_out_file, " = .+32768\n");
32637 toc_initialized = 1;
32639 else
32640 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32642 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32644 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32645 if (!toc_initialized)
32647 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32648 toc_initialized = 1;
32651 else
32653 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32654 if (!toc_initialized)
32656 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32657 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32658 fprintf (asm_out_file, " = .+32768\n");
32659 toc_initialized = 1;
32664 /* Implement TARGET_ASM_INIT_SECTIONS. */
32666 static void
32667 rs6000_elf_asm_init_sections (void)
32669 toc_section
32670 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32672 sdata2_section
32673 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32674 SDATA2_SECTION_ASM_OP);
32677 /* Implement TARGET_SELECT_RTX_SECTION. */
32679 static section *
32680 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32681 unsigned HOST_WIDE_INT align)
32683 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32684 return toc_section;
32685 else
32686 return default_elf_select_rtx_section (mode, x, align);
32689 /* For a SYMBOL_REF, set generic flags and then perform some
32690 target-specific processing.
32692 When the AIX ABI is requested on a non-AIX system, replace the
32693 function name with the real name (with a leading .) rather than the
32694 function descriptor name. This saves a lot of overriding code to
32695 read the prefixes. */
32697 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32698 static void
32699 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32701 default_encode_section_info (decl, rtl, first);
32703 if (first
32704 && TREE_CODE (decl) == FUNCTION_DECL
32705 && !TARGET_AIX
32706 && DEFAULT_ABI == ABI_AIX)
32708 rtx sym_ref = XEXP (rtl, 0);
32709 size_t len = strlen (XSTR (sym_ref, 0));
32710 char *str = XALLOCAVEC (char, len + 2);
32711 str[0] = '.';
32712 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32713 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32717 static inline bool
32718 compare_section_name (const char *section, const char *templ)
32720 int len;
32722 len = strlen (templ);
32723 return (strncmp (section, templ, len) == 0
32724 && (section[len] == 0 || section[len] == '.'));
32727 bool
32728 rs6000_elf_in_small_data_p (const_tree decl)
32730 if (rs6000_sdata == SDATA_NONE)
32731 return false;
32733 /* We want to merge strings, so we never consider them small data. */
32734 if (TREE_CODE (decl) == STRING_CST)
32735 return false;
32737 /* Functions are never in the small data area. */
32738 if (TREE_CODE (decl) == FUNCTION_DECL)
32739 return false;
32741 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32743 const char *section = DECL_SECTION_NAME (decl);
32744 if (compare_section_name (section, ".sdata")
32745 || compare_section_name (section, ".sdata2")
32746 || compare_section_name (section, ".gnu.linkonce.s")
32747 || compare_section_name (section, ".sbss")
32748 || compare_section_name (section, ".sbss2")
32749 || compare_section_name (section, ".gnu.linkonce.sb")
32750 || strcmp (section, ".PPC.EMB.sdata0") == 0
32751 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32752 return true;
32754 else
32756 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32758 if (size > 0
32759 && size <= g_switch_value
32760 /* If it's not public, and we're not going to reference it there,
32761 there's no need to put it in the small data section. */
32762 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32763 return true;
32766 return false;
32769 #endif /* USING_ELFOS_H */
32771 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32773 static bool
32774 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32776 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32779 /* Do not place thread-local symbols refs in the object blocks. */
32781 static bool
32782 rs6000_use_blocks_for_decl_p (const_tree decl)
32784 return !DECL_THREAD_LOCAL_P (decl);
32787 /* Return a REG that occurs in ADDR with coefficient 1.
32788 ADDR can be effectively incremented by incrementing REG.
32790 r0 is special and we must not select it as an address
32791 register by this routine since our caller will try to
32792 increment the returned register via an "la" instruction. */
32795 find_addr_reg (rtx addr)
32797 while (GET_CODE (addr) == PLUS)
32799 if (GET_CODE (XEXP (addr, 0)) == REG
32800 && REGNO (XEXP (addr, 0)) != 0)
32801 addr = XEXP (addr, 0);
32802 else if (GET_CODE (XEXP (addr, 1)) == REG
32803 && REGNO (XEXP (addr, 1)) != 0)
32804 addr = XEXP (addr, 1);
32805 else if (CONSTANT_P (XEXP (addr, 0)))
32806 addr = XEXP (addr, 1);
32807 else if (CONSTANT_P (XEXP (addr, 1)))
32808 addr = XEXP (addr, 0);
32809 else
32810 gcc_unreachable ();
32812 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32813 return addr;
32816 void
32817 rs6000_fatal_bad_address (rtx op)
32819 fatal_insn ("bad address", op);
32822 #if TARGET_MACHO
32824 typedef struct branch_island_d {
32825 tree function_name;
32826 tree label_name;
32827 int line_number;
32828 } branch_island;
32831 static vec<branch_island, va_gc> *branch_islands;
32833 /* Remember to generate a branch island for far calls to the given
32834 function. */
32836 static void
32837 add_compiler_branch_island (tree label_name, tree function_name,
32838 int line_number)
32840 branch_island bi = {function_name, label_name, line_number};
32841 vec_safe_push (branch_islands, bi);
32844 /* Generate far-jump branch islands for everything recorded in
32845 branch_islands. Invoked immediately after the last instruction of
32846 the epilogue has been emitted; the branch islands must be appended
32847 to, and contiguous with, the function body. Mach-O stubs are
32848 generated in machopic_output_stub(). */
32850 static void
32851 macho_branch_islands (void)
32853 char tmp_buf[512];
32855 while (!vec_safe_is_empty (branch_islands))
32857 branch_island *bi = &branch_islands->last ();
32858 const char *label = IDENTIFIER_POINTER (bi->label_name);
32859 const char *name = IDENTIFIER_POINTER (bi->function_name);
32860 char name_buf[512];
32861 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32862 if (name[0] == '*' || name[0] == '&')
32863 strcpy (name_buf, name+1);
32864 else
32866 name_buf[0] = '_';
32867 strcpy (name_buf+1, name);
32869 strcpy (tmp_buf, "\n");
32870 strcat (tmp_buf, label);
32871 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32872 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32873 dbxout_stabd (N_SLINE, bi->line_number);
32874 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32875 if (flag_pic)
32877 if (TARGET_LINK_STACK)
32879 char name[32];
32880 get_ppc476_thunk_name (name);
32881 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32882 strcat (tmp_buf, name);
32883 strcat (tmp_buf, "\n");
32884 strcat (tmp_buf, label);
32885 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32887 else
32889 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32890 strcat (tmp_buf, label);
32891 strcat (tmp_buf, "_pic\n");
32892 strcat (tmp_buf, label);
32893 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32896 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32897 strcat (tmp_buf, name_buf);
32898 strcat (tmp_buf, " - ");
32899 strcat (tmp_buf, label);
32900 strcat (tmp_buf, "_pic)\n");
32902 strcat (tmp_buf, "\tmtlr r0\n");
32904 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32905 strcat (tmp_buf, name_buf);
32906 strcat (tmp_buf, " - ");
32907 strcat (tmp_buf, label);
32908 strcat (tmp_buf, "_pic)\n");
32910 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32912 else
32914 strcat (tmp_buf, ":\nlis r12,hi16(");
32915 strcat (tmp_buf, name_buf);
32916 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32917 strcat (tmp_buf, name_buf);
32918 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32920 output_asm_insn (tmp_buf, 0);
32921 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32922 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32923 dbxout_stabd (N_SLINE, bi->line_number);
32924 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32925 branch_islands->pop ();
32929 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32930 already there or not. */
32932 static int
32933 no_previous_def (tree function_name)
32935 branch_island *bi;
32936 unsigned ix;
32938 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32939 if (function_name == bi->function_name)
32940 return 0;
32941 return 1;
32944 /* GET_PREV_LABEL gets the label name from the previous definition of
32945 the function. */
32947 static tree
32948 get_prev_label (tree function_name)
32950 branch_island *bi;
32951 unsigned ix;
32953 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32954 if (function_name == bi->function_name)
32955 return bi->label_name;
32956 return NULL_TREE;
32959 /* INSN is either a function call or a millicode call. It may have an
32960 unconditional jump in its delay slot.
32962 CALL_DEST is the routine we are calling. */
32964 char *
32965 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32966 int cookie_operand_number)
32968 static char buf[256];
32969 if (darwin_emit_branch_islands
32970 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32971 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32973 tree labelname;
32974 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32976 if (no_previous_def (funname))
32978 rtx label_rtx = gen_label_rtx ();
32979 char *label_buf, temp_buf[256];
32980 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32981 CODE_LABEL_NUMBER (label_rtx));
32982 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32983 labelname = get_identifier (label_buf);
32984 add_compiler_branch_island (labelname, funname, insn_line (insn));
32986 else
32987 labelname = get_prev_label (funname);
32989 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32990 instruction will reach 'foo', otherwise link as 'bl L42'".
32991 "L42" should be a 'branch island', that will do a far jump to
32992 'foo'. Branch islands are generated in
32993 macho_branch_islands(). */
32994 sprintf (buf, "jbsr %%z%d,%.246s",
32995 dest_operand_number, IDENTIFIER_POINTER (labelname));
32997 else
32998 sprintf (buf, "bl %%z%d", dest_operand_number);
32999 return buf;
33002 /* Generate PIC and indirect symbol stubs. */
33004 void
33005 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33007 unsigned int length;
33008 char *symbol_name, *lazy_ptr_name;
33009 char *local_label_0;
33010 static int label = 0;
33012 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33013 symb = (*targetm.strip_name_encoding) (symb);
33016 length = strlen (symb);
33017 symbol_name = XALLOCAVEC (char, length + 32);
33018 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33020 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33021 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33023 if (flag_pic == 2)
33024 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33025 else
33026 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33028 if (flag_pic == 2)
33030 fprintf (file, "\t.align 5\n");
33032 fprintf (file, "%s:\n", stub);
33033 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33035 label++;
33036 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33037 sprintf (local_label_0, "\"L%011d$spb\"", label);
33039 fprintf (file, "\tmflr r0\n");
33040 if (TARGET_LINK_STACK)
33042 char name[32];
33043 get_ppc476_thunk_name (name);
33044 fprintf (file, "\tbl %s\n", name);
33045 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33047 else
33049 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33050 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33052 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33053 lazy_ptr_name, local_label_0);
33054 fprintf (file, "\tmtlr r0\n");
33055 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33056 (TARGET_64BIT ? "ldu" : "lwzu"),
33057 lazy_ptr_name, local_label_0);
33058 fprintf (file, "\tmtctr r12\n");
33059 fprintf (file, "\tbctr\n");
33061 else
33063 fprintf (file, "\t.align 4\n");
33065 fprintf (file, "%s:\n", stub);
33066 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33068 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33069 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33070 (TARGET_64BIT ? "ldu" : "lwzu"),
33071 lazy_ptr_name);
33072 fprintf (file, "\tmtctr r12\n");
33073 fprintf (file, "\tbctr\n");
33076 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33077 fprintf (file, "%s:\n", lazy_ptr_name);
33078 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33079 fprintf (file, "%sdyld_stub_binding_helper\n",
33080 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33083 /* Legitimize PIC addresses. If the address is already
33084 position-independent, we return ORIG. Newly generated
33085 position-independent addresses go into a reg. This is REG if non
33086 zero, otherwise we allocate register(s) as necessary. */
33088 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33091 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33092 rtx reg)
33094 rtx base, offset;
33096 if (reg == NULL && !reload_completed)
33097 reg = gen_reg_rtx (Pmode);
33099 if (GET_CODE (orig) == CONST)
33101 rtx reg_temp;
33103 if (GET_CODE (XEXP (orig, 0)) == PLUS
33104 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33105 return orig;
33107 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33109 /* Use a different reg for the intermediate value, as
33110 it will be marked UNCHANGING. */
33111 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33112 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33113 Pmode, reg_temp);
33114 offset =
33115 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33116 Pmode, reg);
33118 if (GET_CODE (offset) == CONST_INT)
33120 if (SMALL_INT (offset))
33121 return plus_constant (Pmode, base, INTVAL (offset));
33122 else if (!reload_completed)
33123 offset = force_reg (Pmode, offset);
33124 else
33126 rtx mem = force_const_mem (Pmode, orig);
33127 return machopic_legitimize_pic_address (mem, Pmode, reg);
33130 return gen_rtx_PLUS (Pmode, base, offset);
33133 /* Fall back on generic machopic code. */
33134 return machopic_legitimize_pic_address (orig, mode, reg);
33137 /* Output a .machine directive for the Darwin assembler, and call
33138 the generic start_file routine. */
33140 static void
33141 rs6000_darwin_file_start (void)
33143 static const struct
33145 const char *arg;
33146 const char *name;
33147 HOST_WIDE_INT if_set;
33148 } mapping[] = {
33149 { "ppc64", "ppc64", MASK_64BIT },
33150 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33151 { "power4", "ppc970", 0 },
33152 { "G5", "ppc970", 0 },
33153 { "7450", "ppc7450", 0 },
33154 { "7400", "ppc7400", MASK_ALTIVEC },
33155 { "G4", "ppc7400", 0 },
33156 { "750", "ppc750", 0 },
33157 { "740", "ppc750", 0 },
33158 { "G3", "ppc750", 0 },
33159 { "604e", "ppc604e", 0 },
33160 { "604", "ppc604", 0 },
33161 { "603e", "ppc603", 0 },
33162 { "603", "ppc603", 0 },
33163 { "601", "ppc601", 0 },
33164 { NULL, "ppc", 0 } };
33165 const char *cpu_id = "";
33166 size_t i;
33168 rs6000_file_start ();
33169 darwin_file_start ();
33171 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33173 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33174 cpu_id = rs6000_default_cpu;
33176 if (global_options_set.x_rs6000_cpu_index)
33177 cpu_id = processor_target_table[rs6000_cpu_index].name;
33179 /* Look through the mapping array. Pick the first name that either
33180 matches the argument, has a bit set in IF_SET that is also set
33181 in the target flags, or has a NULL name. */
33183 i = 0;
33184 while (mapping[i].arg != NULL
33185 && strcmp (mapping[i].arg, cpu_id) != 0
33186 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33187 i++;
33189 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33192 #endif /* TARGET_MACHO */
33194 #if TARGET_ELF
33195 static int
33196 rs6000_elf_reloc_rw_mask (void)
33198 if (flag_pic)
33199 return 3;
33200 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33201 return 2;
33202 else
33203 return 0;
33206 /* Record an element in the table of global constructors. SYMBOL is
33207 a SYMBOL_REF of the function to be called; PRIORITY is a number
33208 between 0 and MAX_INIT_PRIORITY.
33210 This differs from default_named_section_asm_out_constructor in
33211 that we have special handling for -mrelocatable. */
33213 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33214 static void
33215 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33217 const char *section = ".ctors";
33218 char buf[18];
33220 if (priority != DEFAULT_INIT_PRIORITY)
33222 sprintf (buf, ".ctors.%.5u",
33223 /* Invert the numbering so the linker puts us in the proper
33224 order; constructors are run from right to left, and the
33225 linker sorts in increasing order. */
33226 MAX_INIT_PRIORITY - priority);
33227 section = buf;
33230 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33231 assemble_align (POINTER_SIZE);
33233 if (DEFAULT_ABI == ABI_V4
33234 && (TARGET_RELOCATABLE || flag_pic > 1))
33236 fputs ("\t.long (", asm_out_file);
33237 output_addr_const (asm_out_file, symbol);
33238 fputs (")@fixup\n", asm_out_file);
33240 else
33241 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33244 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33245 static void
33246 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33248 const char *section = ".dtors";
33249 char buf[18];
33251 if (priority != DEFAULT_INIT_PRIORITY)
33253 sprintf (buf, ".dtors.%.5u",
33254 /* Invert the numbering so the linker puts us in the proper
33255 order; constructors are run from right to left, and the
33256 linker sorts in increasing order. */
33257 MAX_INIT_PRIORITY - priority);
33258 section = buf;
33261 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33262 assemble_align (POINTER_SIZE);
33264 if (DEFAULT_ABI == ABI_V4
33265 && (TARGET_RELOCATABLE || flag_pic > 1))
33267 fputs ("\t.long (", asm_out_file);
33268 output_addr_const (asm_out_file, symbol);
33269 fputs (")@fixup\n", asm_out_file);
33271 else
33272 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33275 void
33276 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33278 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33280 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33281 ASM_OUTPUT_LABEL (file, name);
33282 fputs (DOUBLE_INT_ASM_OP, file);
33283 rs6000_output_function_entry (file, name);
33284 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33285 if (DOT_SYMBOLS)
33287 fputs ("\t.size\t", file);
33288 assemble_name (file, name);
33289 fputs (",24\n\t.type\t.", file);
33290 assemble_name (file, name);
33291 fputs (",@function\n", file);
33292 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33294 fputs ("\t.globl\t.", file);
33295 assemble_name (file, name);
33296 putc ('\n', file);
33299 else
33300 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33301 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33302 rs6000_output_function_entry (file, name);
33303 fputs (":\n", file);
33304 return;
33307 if (DEFAULT_ABI == ABI_V4
33308 && (TARGET_RELOCATABLE || flag_pic > 1)
33309 && !TARGET_SECURE_PLT
33310 && (!constant_pool_empty_p () || crtl->profile)
33311 && uses_TOC ())
33313 char buf[256];
33315 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33317 fprintf (file, "\t.long ");
33318 assemble_name (file, toc_label_name);
33319 need_toc_init = 1;
33320 putc ('-', file);
33321 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33322 assemble_name (file, buf);
33323 putc ('\n', file);
33326 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33327 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33329 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33331 char buf[256];
33333 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33335 fprintf (file, "\t.quad .TOC.-");
33336 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33337 assemble_name (file, buf);
33338 putc ('\n', file);
33341 if (DEFAULT_ABI == ABI_AIX)
33343 const char *desc_name, *orig_name;
33345 orig_name = (*targetm.strip_name_encoding) (name);
33346 desc_name = orig_name;
33347 while (*desc_name == '.')
33348 desc_name++;
33350 if (TREE_PUBLIC (decl))
33351 fprintf (file, "\t.globl %s\n", desc_name);
33353 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33354 fprintf (file, "%s:\n", desc_name);
33355 fprintf (file, "\t.long %s\n", orig_name);
33356 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33357 fputs ("\t.long 0\n", file);
33358 fprintf (file, "\t.previous\n");
33360 ASM_OUTPUT_LABEL (file, name);
33363 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33364 static void
33365 rs6000_elf_file_end (void)
33367 #ifdef HAVE_AS_GNU_ATTRIBUTE
33368 /* ??? The value emitted depends on options active at file end.
33369 Assume anyone using #pragma or attributes that might change
33370 options knows what they are doing. */
33371 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33372 && rs6000_passes_float)
33374 int fp;
33376 if (TARGET_DF_FPR)
33377 fp = 1;
33378 else if (TARGET_SF_FPR)
33379 fp = 3;
33380 else
33381 fp = 2;
33382 if (rs6000_passes_long_double)
33384 if (!TARGET_LONG_DOUBLE_128)
33385 fp |= 2 * 4;
33386 else if (TARGET_IEEEQUAD)
33387 fp |= 3 * 4;
33388 else
33389 fp |= 1 * 4;
33391 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33393 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33395 if (rs6000_passes_vector)
33396 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33397 (TARGET_ALTIVEC_ABI ? 2 : 1));
33398 if (rs6000_returns_struct)
33399 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33400 aix_struct_return ? 2 : 1);
33402 #endif
33403 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33404 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33405 file_end_indicate_exec_stack ();
33406 #endif
33408 if (flag_split_stack)
33409 file_end_indicate_split_stack ();
33411 if (cpu_builtin_p)
33413 /* We have expanded a CPU builtin, so we need to emit a reference to
33414 the special symbol that LIBC uses to declare it supports the
33415 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33416 switch_to_section (data_section);
33417 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33418 fprintf (asm_out_file, "\t%s %s\n",
33419 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33422 #endif
33424 #if TARGET_XCOFF
33426 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33427 #define HAVE_XCOFF_DWARF_EXTRAS 0
33428 #endif
33430 static enum unwind_info_type
33431 rs6000_xcoff_debug_unwind_info (void)
33433 return UI_NONE;
33436 static void
33437 rs6000_xcoff_asm_output_anchor (rtx symbol)
33439 char buffer[100];
33441 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33442 SYMBOL_REF_BLOCK_OFFSET (symbol));
33443 fprintf (asm_out_file, "%s", SET_ASM_OP);
33444 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33445 fprintf (asm_out_file, ",");
33446 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33447 fprintf (asm_out_file, "\n");
33450 static void
33451 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33453 fputs (GLOBAL_ASM_OP, stream);
33454 RS6000_OUTPUT_BASENAME (stream, name);
33455 putc ('\n', stream);
33458 /* A get_unnamed_decl callback, used for read-only sections. PTR
33459 points to the section string variable. */
33461 static void
33462 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33464 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33465 *(const char *const *) directive,
33466 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33469 /* Likewise for read-write sections. */
33471 static void
33472 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33474 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33475 *(const char *const *) directive,
33476 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33479 static void
33480 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33482 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33483 *(const char *const *) directive,
33484 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33487 /* A get_unnamed_section callback, used for switching to toc_section. */
33489 static void
33490 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33492 if (TARGET_MINIMAL_TOC)
33494 /* toc_section is always selected at least once from
33495 rs6000_xcoff_file_start, so this is guaranteed to
33496 always be defined once and only once in each file. */
33497 if (!toc_initialized)
33499 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33500 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33501 toc_initialized = 1;
33503 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33504 (TARGET_32BIT ? "" : ",3"));
33506 else
33507 fputs ("\t.toc\n", asm_out_file);
33510 /* Implement TARGET_ASM_INIT_SECTIONS. */
33512 static void
33513 rs6000_xcoff_asm_init_sections (void)
33515 read_only_data_section
33516 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33517 &xcoff_read_only_section_name);
33519 private_data_section
33520 = get_unnamed_section (SECTION_WRITE,
33521 rs6000_xcoff_output_readwrite_section_asm_op,
33522 &xcoff_private_data_section_name);
33524 tls_data_section
33525 = get_unnamed_section (SECTION_TLS,
33526 rs6000_xcoff_output_tls_section_asm_op,
33527 &xcoff_tls_data_section_name);
33529 tls_private_data_section
33530 = get_unnamed_section (SECTION_TLS,
33531 rs6000_xcoff_output_tls_section_asm_op,
33532 &xcoff_private_data_section_name);
33534 read_only_private_data_section
33535 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33536 &xcoff_private_data_section_name);
33538 toc_section
33539 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33541 readonly_data_section = read_only_data_section;
33544 static int
33545 rs6000_xcoff_reloc_rw_mask (void)
33547 return 3;
33550 static void
33551 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33552 tree decl ATTRIBUTE_UNUSED)
33554 int smclass;
33555 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33557 if (flags & SECTION_EXCLUDE)
33558 smclass = 4;
33559 else if (flags & SECTION_DEBUG)
33561 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33562 return;
33564 else if (flags & SECTION_CODE)
33565 smclass = 0;
33566 else if (flags & SECTION_TLS)
33567 smclass = 3;
33568 else if (flags & SECTION_WRITE)
33569 smclass = 2;
33570 else
33571 smclass = 1;
33573 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33574 (flags & SECTION_CODE) ? "." : "",
33575 name, suffix[smclass], flags & SECTION_ENTSIZE);
33578 #define IN_NAMED_SECTION(DECL) \
33579 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33580 && DECL_SECTION_NAME (DECL) != NULL)
33582 static section *
33583 rs6000_xcoff_select_section (tree decl, int reloc,
33584 unsigned HOST_WIDE_INT align)
33586 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33587 named section. */
33588 if (align > BIGGEST_ALIGNMENT)
33590 resolve_unique_section (decl, reloc, true);
33591 if (IN_NAMED_SECTION (decl))
33592 return get_named_section (decl, NULL, reloc);
33595 if (decl_readonly_section (decl, reloc))
33597 if (TREE_PUBLIC (decl))
33598 return read_only_data_section;
33599 else
33600 return read_only_private_data_section;
33602 else
33604 #if HAVE_AS_TLS
33605 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33607 if (TREE_PUBLIC (decl))
33608 return tls_data_section;
33609 else if (bss_initializer_p (decl))
33611 /* Convert to COMMON to emit in BSS. */
33612 DECL_COMMON (decl) = 1;
33613 return tls_comm_section;
33615 else
33616 return tls_private_data_section;
33618 else
33619 #endif
33620 if (TREE_PUBLIC (decl))
33621 return data_section;
33622 else
33623 return private_data_section;
33627 static void
33628 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33630 const char *name;
33632 /* Use select_section for private data and uninitialized data with
33633 alignment <= BIGGEST_ALIGNMENT. */
33634 if (!TREE_PUBLIC (decl)
33635 || DECL_COMMON (decl)
33636 || (DECL_INITIAL (decl) == NULL_TREE
33637 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33638 || DECL_INITIAL (decl) == error_mark_node
33639 || (flag_zero_initialized_in_bss
33640 && initializer_zerop (DECL_INITIAL (decl))))
33641 return;
33643 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33644 name = (*targetm.strip_name_encoding) (name);
33645 set_decl_section_name (decl, name);
33648 /* Select section for constant in constant pool.
33650 On RS/6000, all constants are in the private read-only data area.
33651 However, if this is being placed in the TOC it must be output as a
33652 toc entry. */
33654 static section *
33655 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33656 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33658 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33659 return toc_section;
33660 else
33661 return read_only_private_data_section;
33664 /* Remove any trailing [DS] or the like from the symbol name. */
33666 static const char *
33667 rs6000_xcoff_strip_name_encoding (const char *name)
33669 size_t len;
33670 if (*name == '*')
33671 name++;
33672 len = strlen (name);
33673 if (name[len - 1] == ']')
33674 return ggc_alloc_string (name, len - 4);
33675 else
33676 return name;
33679 /* Section attributes. AIX is always PIC. */
33681 static unsigned int
33682 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33684 unsigned int align;
33685 unsigned int flags = default_section_type_flags (decl, name, reloc);
33687 /* Align to at least UNIT size. */
33688 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33689 align = MIN_UNITS_PER_WORD;
33690 else
33691 /* Increase alignment of large objects if not already stricter. */
33692 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33693 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33694 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33696 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33699 /* Output at beginning of assembler file.
33701 Initialize the section names for the RS/6000 at this point.
33703 Specify filename, including full path, to assembler.
33705 We want to go into the TOC section so at least one .toc will be emitted.
33706 Also, in order to output proper .bs/.es pairs, we need at least one static
33707 [RW] section emitted.
33709 Finally, declare mcount when profiling to make the assembler happy. */
33711 static void
33712 rs6000_xcoff_file_start (void)
33714 rs6000_gen_section_name (&xcoff_bss_section_name,
33715 main_input_filename, ".bss_");
33716 rs6000_gen_section_name (&xcoff_private_data_section_name,
33717 main_input_filename, ".rw_");
33718 rs6000_gen_section_name (&xcoff_read_only_section_name,
33719 main_input_filename, ".ro_");
33720 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33721 main_input_filename, ".tls_");
33722 rs6000_gen_section_name (&xcoff_tbss_section_name,
33723 main_input_filename, ".tbss_[UL]");
33725 fputs ("\t.file\t", asm_out_file);
33726 output_quoted_string (asm_out_file, main_input_filename);
33727 fputc ('\n', asm_out_file);
33728 if (write_symbols != NO_DEBUG)
33729 switch_to_section (private_data_section);
33730 switch_to_section (toc_section);
33731 switch_to_section (text_section);
33732 if (profile_flag)
33733 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33734 rs6000_file_start ();
33737 /* Output at end of assembler file.
33738 On the RS/6000, referencing data should automatically pull in text. */
33740 static void
33741 rs6000_xcoff_file_end (void)
33743 switch_to_section (text_section);
33744 fputs ("_section_.text:\n", asm_out_file);
33745 switch_to_section (data_section);
33746 fputs (TARGET_32BIT
33747 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33748 asm_out_file);
33751 struct declare_alias_data
33753 FILE *file;
33754 bool function_descriptor;
33757 /* Declare alias N. A helper function for for_node_and_aliases. */
33759 static bool
33760 rs6000_declare_alias (struct symtab_node *n, void *d)
33762 struct declare_alias_data *data = (struct declare_alias_data *)d;
33763 /* Main symbol is output specially, because varasm machinery does part of
33764 the job for us - we do not need to declare .globl/lglobs and such. */
33765 if (!n->alias || n->weakref)
33766 return false;
33768 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33769 return false;
33771 /* Prevent assemble_alias from trying to use .set pseudo operation
33772 that does not behave as expected by the middle-end. */
33773 TREE_ASM_WRITTEN (n->decl) = true;
33775 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33776 char *buffer = (char *) alloca (strlen (name) + 2);
33777 char *p;
33778 int dollar_inside = 0;
33780 strcpy (buffer, name);
33781 p = strchr (buffer, '$');
33782 while (p) {
33783 *p = '_';
33784 dollar_inside++;
33785 p = strchr (p + 1, '$');
33787 if (TREE_PUBLIC (n->decl))
33789 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33791 if (dollar_inside) {
33792 if (data->function_descriptor)
33793 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33794 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33796 if (data->function_descriptor)
33798 fputs ("\t.globl .", data->file);
33799 RS6000_OUTPUT_BASENAME (data->file, buffer);
33800 putc ('\n', data->file);
33802 fputs ("\t.globl ", data->file);
33803 RS6000_OUTPUT_BASENAME (data->file, buffer);
33804 putc ('\n', data->file);
33806 #ifdef ASM_WEAKEN_DECL
33807 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33808 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33809 #endif
33811 else
33813 if (dollar_inside)
33815 if (data->function_descriptor)
33816 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33817 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33819 if (data->function_descriptor)
33821 fputs ("\t.lglobl .", data->file);
33822 RS6000_OUTPUT_BASENAME (data->file, buffer);
33823 putc ('\n', data->file);
33825 fputs ("\t.lglobl ", data->file);
33826 RS6000_OUTPUT_BASENAME (data->file, buffer);
33827 putc ('\n', data->file);
33829 if (data->function_descriptor)
33830 fputs (".", data->file);
33831 RS6000_OUTPUT_BASENAME (data->file, buffer);
33832 fputs (":\n", data->file);
33833 return false;
33837 #ifdef HAVE_GAS_HIDDEN
33838 /* Helper function to calculate visibility of a DECL
33839 and return the value as a const string. */
33841 static const char *
33842 rs6000_xcoff_visibility (tree decl)
33844 static const char * const visibility_types[] = {
33845 "", ",protected", ",hidden", ",internal"
33848 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33850 if (TREE_CODE (decl) == FUNCTION_DECL
33851 && cgraph_node::get (decl)
33852 && cgraph_node::get (decl)->instrumentation_clone
33853 && cgraph_node::get (decl)->instrumented_version)
33854 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33856 return visibility_types[vis];
33858 #endif
33861 /* This macro produces the initial definition of a function name.
33862 On the RS/6000, we need to place an extra '.' in the function name and
33863 output the function descriptor.
33864 Dollar signs are converted to underscores.
33866 The csect for the function will have already been created when
33867 text_section was selected. We do have to go back to that csect, however.
33869 The third and fourth parameters to the .function pseudo-op (16 and 044)
33870 are placeholders which no longer have any use.
33872 Because AIX assembler's .set command has unexpected semantics, we output
33873 all aliases as alternative labels in front of the definition. */
33875 void
33876 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33878 char *buffer = (char *) alloca (strlen (name) + 1);
33879 char *p;
33880 int dollar_inside = 0;
33881 struct declare_alias_data data = {file, false};
33883 strcpy (buffer, name);
33884 p = strchr (buffer, '$');
33885 while (p) {
33886 *p = '_';
33887 dollar_inside++;
33888 p = strchr (p + 1, '$');
33890 if (TREE_PUBLIC (decl))
33892 if (!RS6000_WEAK || !DECL_WEAK (decl))
33894 if (dollar_inside) {
33895 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33896 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33898 fputs ("\t.globl .", file);
33899 RS6000_OUTPUT_BASENAME (file, buffer);
33900 #ifdef HAVE_GAS_HIDDEN
33901 fputs (rs6000_xcoff_visibility (decl), file);
33902 #endif
33903 putc ('\n', file);
33906 else
33908 if (dollar_inside) {
33909 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33910 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33912 fputs ("\t.lglobl .", file);
33913 RS6000_OUTPUT_BASENAME (file, buffer);
33914 putc ('\n', file);
33916 fputs ("\t.csect ", file);
33917 RS6000_OUTPUT_BASENAME (file, buffer);
33918 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33919 RS6000_OUTPUT_BASENAME (file, buffer);
33920 fputs (":\n", file);
33921 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33922 &data, true);
33923 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33924 RS6000_OUTPUT_BASENAME (file, buffer);
33925 fputs (", TOC[tc0], 0\n", file);
33926 in_section = NULL;
33927 switch_to_section (function_section (decl));
33928 putc ('.', file);
33929 RS6000_OUTPUT_BASENAME (file, buffer);
33930 fputs (":\n", file);
33931 data.function_descriptor = true;
33932 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33933 &data, true);
33934 if (!DECL_IGNORED_P (decl))
33936 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33937 xcoffout_declare_function (file, decl, buffer);
33938 else if (write_symbols == DWARF2_DEBUG)
33940 name = (*targetm.strip_name_encoding) (name);
33941 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33944 return;
33948 /* Output assembly language to globalize a symbol from a DECL,
33949 possibly with visibility. */
33951 void
33952 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33954 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33955 fputs (GLOBAL_ASM_OP, stream);
33956 RS6000_OUTPUT_BASENAME (stream, name);
33957 #ifdef HAVE_GAS_HIDDEN
33958 fputs (rs6000_xcoff_visibility (decl), stream);
33959 #endif
33960 putc ('\n', stream);
33963 /* Output assembly language to define a symbol as COMMON from a DECL,
33964 possibly with visibility. */
33966 void
33967 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33968 tree decl ATTRIBUTE_UNUSED,
33969 const char *name,
33970 unsigned HOST_WIDE_INT size,
33971 unsigned HOST_WIDE_INT align)
33973 unsigned HOST_WIDE_INT align2 = 2;
33975 if (align > 32)
33976 align2 = floor_log2 (align / BITS_PER_UNIT);
33977 else if (size > 4)
33978 align2 = 3;
33980 fputs (COMMON_ASM_OP, stream);
33981 RS6000_OUTPUT_BASENAME (stream, name);
33983 fprintf (stream,
33984 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33985 size, align2);
33987 #ifdef HAVE_GAS_HIDDEN
33988 fputs (rs6000_xcoff_visibility (decl), stream);
33989 #endif
33990 putc ('\n', stream);
33993 /* This macro produces the initial definition of a object (variable) name.
33994 Because AIX assembler's .set command has unexpected semantics, we output
33995 all aliases as alternative labels in front of the definition. */
33997 void
33998 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34000 struct declare_alias_data data = {file, false};
34001 RS6000_OUTPUT_BASENAME (file, name);
34002 fputs (":\n", file);
34003 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34004 &data, true);
34007 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34009 void
34010 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34012 fputs (integer_asm_op (size, FALSE), file);
34013 assemble_name (file, label);
34014 fputs ("-$", file);
34017 /* Output a symbol offset relative to the dbase for the current object.
34018 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34019 signed offsets.
34021 __gcc_unwind_dbase is embedded in all executables/libraries through
34022 libgcc/config/rs6000/crtdbase.S. */
34024 void
34025 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34027 fputs (integer_asm_op (size, FALSE), file);
34028 assemble_name (file, label);
34029 fputs("-__gcc_unwind_dbase", file);
34032 #ifdef HAVE_AS_TLS
34033 static void
34034 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34036 rtx symbol;
34037 int flags;
34038 const char *symname;
34040 default_encode_section_info (decl, rtl, first);
34042 /* Careful not to prod global register variables. */
34043 if (!MEM_P (rtl))
34044 return;
34045 symbol = XEXP (rtl, 0);
34046 if (GET_CODE (symbol) != SYMBOL_REF)
34047 return;
34049 flags = SYMBOL_REF_FLAGS (symbol);
34051 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34052 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34054 SYMBOL_REF_FLAGS (symbol) = flags;
34056 /* Append mapping class to extern decls. */
34057 symname = XSTR (symbol, 0);
34058 if (decl /* sync condition with assemble_external () */
34059 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34060 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34061 || TREE_CODE (decl) == FUNCTION_DECL)
34062 && symname[strlen (symname) - 1] != ']')
34064 char *newname = (char *) alloca (strlen (symname) + 5);
34065 strcpy (newname, symname);
34066 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34067 ? "[DS]" : "[UA]"));
34068 XSTR (symbol, 0) = ggc_strdup (newname);
34071 #endif /* HAVE_AS_TLS */
34072 #endif /* TARGET_XCOFF */
34074 void
34075 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34076 const char *name, const char *val)
34078 fputs ("\t.weak\t", stream);
34079 RS6000_OUTPUT_BASENAME (stream, name);
34080 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34081 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34083 if (TARGET_XCOFF)
34084 fputs ("[DS]", stream);
34085 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34086 if (TARGET_XCOFF)
34087 fputs (rs6000_xcoff_visibility (decl), stream);
34088 #endif
34089 fputs ("\n\t.weak\t.", stream);
34090 RS6000_OUTPUT_BASENAME (stream, name);
34092 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34093 if (TARGET_XCOFF)
34094 fputs (rs6000_xcoff_visibility (decl), stream);
34095 #endif
34096 fputc ('\n', stream);
34097 if (val)
34099 #ifdef ASM_OUTPUT_DEF
34100 ASM_OUTPUT_DEF (stream, name, val);
34101 #endif
34102 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34103 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34105 fputs ("\t.set\t.", stream);
34106 RS6000_OUTPUT_BASENAME (stream, name);
34107 fputs (",.", stream);
34108 RS6000_OUTPUT_BASENAME (stream, val);
34109 fputc ('\n', stream);
34115 /* Return true if INSN should not be copied. */
34117 static bool
34118 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34120 return recog_memoized (insn) >= 0
34121 && get_attr_cannot_copy (insn);
34124 /* Compute a (partial) cost for rtx X. Return true if the complete
34125 cost has been computed, and false if subexpressions should be
34126 scanned. In either case, *TOTAL contains the cost result. */
34128 static bool
34129 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34130 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34132 int code = GET_CODE (x);
34134 switch (code)
34136 /* On the RS/6000, if it is valid in the insn, it is free. */
34137 case CONST_INT:
34138 if (((outer_code == SET
34139 || outer_code == PLUS
34140 || outer_code == MINUS)
34141 && (satisfies_constraint_I (x)
34142 || satisfies_constraint_L (x)))
34143 || (outer_code == AND
34144 && (satisfies_constraint_K (x)
34145 || (mode == SImode
34146 ? satisfies_constraint_L (x)
34147 : satisfies_constraint_J (x))))
34148 || ((outer_code == IOR || outer_code == XOR)
34149 && (satisfies_constraint_K (x)
34150 || (mode == SImode
34151 ? satisfies_constraint_L (x)
34152 : satisfies_constraint_J (x))))
34153 || outer_code == ASHIFT
34154 || outer_code == ASHIFTRT
34155 || outer_code == LSHIFTRT
34156 || outer_code == ROTATE
34157 || outer_code == ROTATERT
34158 || outer_code == ZERO_EXTRACT
34159 || (outer_code == MULT
34160 && satisfies_constraint_I (x))
34161 || ((outer_code == DIV || outer_code == UDIV
34162 || outer_code == MOD || outer_code == UMOD)
34163 && exact_log2 (INTVAL (x)) >= 0)
34164 || (outer_code == COMPARE
34165 && (satisfies_constraint_I (x)
34166 || satisfies_constraint_K (x)))
34167 || ((outer_code == EQ || outer_code == NE)
34168 && (satisfies_constraint_I (x)
34169 || satisfies_constraint_K (x)
34170 || (mode == SImode
34171 ? satisfies_constraint_L (x)
34172 : satisfies_constraint_J (x))))
34173 || (outer_code == GTU
34174 && satisfies_constraint_I (x))
34175 || (outer_code == LTU
34176 && satisfies_constraint_P (x)))
34178 *total = 0;
34179 return true;
34181 else if ((outer_code == PLUS
34182 && reg_or_add_cint_operand (x, VOIDmode))
34183 || (outer_code == MINUS
34184 && reg_or_sub_cint_operand (x, VOIDmode))
34185 || ((outer_code == SET
34186 || outer_code == IOR
34187 || outer_code == XOR)
34188 && (INTVAL (x)
34189 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34191 *total = COSTS_N_INSNS (1);
34192 return true;
34194 /* FALLTHRU */
34196 case CONST_DOUBLE:
34197 case CONST_WIDE_INT:
34198 case CONST:
34199 case HIGH:
34200 case SYMBOL_REF:
34201 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34202 return true;
34204 case MEM:
34205 /* When optimizing for size, MEM should be slightly more expensive
34206 than generating address, e.g., (plus (reg) (const)).
34207 L1 cache latency is about two instructions. */
34208 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34209 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
34210 *total += COSTS_N_INSNS (100);
34211 return true;
34213 case LABEL_REF:
34214 *total = 0;
34215 return true;
34217 case PLUS:
34218 case MINUS:
34219 if (FLOAT_MODE_P (mode))
34220 *total = rs6000_cost->fp;
34221 else
34222 *total = COSTS_N_INSNS (1);
34223 return false;
34225 case MULT:
34226 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34227 && satisfies_constraint_I (XEXP (x, 1)))
34229 if (INTVAL (XEXP (x, 1)) >= -256
34230 && INTVAL (XEXP (x, 1)) <= 255)
34231 *total = rs6000_cost->mulsi_const9;
34232 else
34233 *total = rs6000_cost->mulsi_const;
34235 else if (mode == SFmode)
34236 *total = rs6000_cost->fp;
34237 else if (FLOAT_MODE_P (mode))
34238 *total = rs6000_cost->dmul;
34239 else if (mode == DImode)
34240 *total = rs6000_cost->muldi;
34241 else
34242 *total = rs6000_cost->mulsi;
34243 return false;
34245 case FMA:
34246 if (mode == SFmode)
34247 *total = rs6000_cost->fp;
34248 else
34249 *total = rs6000_cost->dmul;
34250 break;
34252 case DIV:
34253 case MOD:
34254 if (FLOAT_MODE_P (mode))
34256 *total = mode == DFmode ? rs6000_cost->ddiv
34257 : rs6000_cost->sdiv;
34258 return false;
34260 /* FALLTHRU */
34262 case UDIV:
34263 case UMOD:
34264 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34265 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34267 if (code == DIV || code == MOD)
34268 /* Shift, addze */
34269 *total = COSTS_N_INSNS (2);
34270 else
34271 /* Shift */
34272 *total = COSTS_N_INSNS (1);
34274 else
34276 if (GET_MODE (XEXP (x, 1)) == DImode)
34277 *total = rs6000_cost->divdi;
34278 else
34279 *total = rs6000_cost->divsi;
34281 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34282 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34283 *total += COSTS_N_INSNS (2);
34284 return false;
34286 case CTZ:
34287 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34288 return false;
34290 case FFS:
34291 *total = COSTS_N_INSNS (4);
34292 return false;
34294 case POPCOUNT:
34295 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34296 return false;
34298 case PARITY:
34299 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34300 return false;
34302 case NOT:
34303 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34304 *total = 0;
34305 else
34306 *total = COSTS_N_INSNS (1);
34307 return false;
34309 case AND:
34310 if (CONST_INT_P (XEXP (x, 1)))
34312 rtx left = XEXP (x, 0);
34313 rtx_code left_code = GET_CODE (left);
34315 /* rotate-and-mask: 1 insn. */
34316 if ((left_code == ROTATE
34317 || left_code == ASHIFT
34318 || left_code == LSHIFTRT)
34319 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34321 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34322 if (!CONST_INT_P (XEXP (left, 1)))
34323 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34324 *total += COSTS_N_INSNS (1);
34325 return true;
34328 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34329 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34330 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34331 || (val & 0xffff) == val
34332 || (val & 0xffff0000) == val
34333 || ((val & 0xffff) == 0 && mode == SImode))
34335 *total = rtx_cost (left, mode, AND, 0, speed);
34336 *total += COSTS_N_INSNS (1);
34337 return true;
34340 /* 2 insns. */
34341 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34343 *total = rtx_cost (left, mode, AND, 0, speed);
34344 *total += COSTS_N_INSNS (2);
34345 return true;
34349 *total = COSTS_N_INSNS (1);
34350 return false;
34352 case IOR:
34353 /* FIXME */
34354 *total = COSTS_N_INSNS (1);
34355 return true;
34357 case CLZ:
34358 case XOR:
34359 case ZERO_EXTRACT:
34360 *total = COSTS_N_INSNS (1);
34361 return false;
34363 case ASHIFT:
34364 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34365 the sign extend and shift separately within the insn. */
34366 if (TARGET_EXTSWSLI && mode == DImode
34367 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34368 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34370 *total = 0;
34371 return false;
34373 /* fall through */
34375 case ASHIFTRT:
34376 case LSHIFTRT:
34377 case ROTATE:
34378 case ROTATERT:
34379 /* Handle mul_highpart. */
34380 if (outer_code == TRUNCATE
34381 && GET_CODE (XEXP (x, 0)) == MULT)
34383 if (mode == DImode)
34384 *total = rs6000_cost->muldi;
34385 else
34386 *total = rs6000_cost->mulsi;
34387 return true;
34389 else if (outer_code == AND)
34390 *total = 0;
34391 else
34392 *total = COSTS_N_INSNS (1);
34393 return false;
34395 case SIGN_EXTEND:
34396 case ZERO_EXTEND:
34397 if (GET_CODE (XEXP (x, 0)) == MEM)
34398 *total = 0;
34399 else
34400 *total = COSTS_N_INSNS (1);
34401 return false;
34403 case COMPARE:
34404 case NEG:
34405 case ABS:
34406 if (!FLOAT_MODE_P (mode))
34408 *total = COSTS_N_INSNS (1);
34409 return false;
34411 /* FALLTHRU */
34413 case FLOAT:
34414 case UNSIGNED_FLOAT:
34415 case FIX:
34416 case UNSIGNED_FIX:
34417 case FLOAT_TRUNCATE:
34418 *total = rs6000_cost->fp;
34419 return false;
34421 case FLOAT_EXTEND:
34422 if (mode == DFmode)
34423 *total = rs6000_cost->sfdf_convert;
34424 else
34425 *total = rs6000_cost->fp;
34426 return false;
34428 case UNSPEC:
34429 switch (XINT (x, 1))
34431 case UNSPEC_FRSP:
34432 *total = rs6000_cost->fp;
34433 return true;
34435 default:
34436 break;
34438 break;
34440 case CALL:
34441 case IF_THEN_ELSE:
34442 if (!speed)
34444 *total = COSTS_N_INSNS (1);
34445 return true;
34447 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34449 *total = rs6000_cost->fp;
34450 return false;
34452 break;
34454 case NE:
34455 case EQ:
34456 case GTU:
34457 case LTU:
34458 /* Carry bit requires mode == Pmode.
34459 NEG or PLUS already counted so only add one. */
34460 if (mode == Pmode
34461 && (outer_code == NEG || outer_code == PLUS))
34463 *total = COSTS_N_INSNS (1);
34464 return true;
34466 if (outer_code == SET)
34468 if (XEXP (x, 1) == const0_rtx)
34470 if (TARGET_ISEL && !TARGET_MFCRF)
34471 *total = COSTS_N_INSNS (8);
34472 else
34473 *total = COSTS_N_INSNS (2);
34474 return true;
34476 else
34478 *total = COSTS_N_INSNS (3);
34479 return false;
34482 /* FALLTHRU */
34484 case GT:
34485 case LT:
34486 case UNORDERED:
34487 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34489 if (TARGET_ISEL && !TARGET_MFCRF)
34490 *total = COSTS_N_INSNS (8);
34491 else
34492 *total = COSTS_N_INSNS (2);
34493 return true;
34495 /* CC COMPARE. */
34496 if (outer_code == COMPARE)
34498 *total = 0;
34499 return true;
34501 break;
34503 default:
34504 break;
34507 return false;
34510 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34512 static bool
34513 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34514 int opno, int *total, bool speed)
34516 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34518 fprintf (stderr,
34519 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34520 "opno = %d, total = %d, speed = %s, x:\n",
34521 ret ? "complete" : "scan inner",
34522 GET_MODE_NAME (mode),
34523 GET_RTX_NAME (outer_code),
34524 opno,
34525 *total,
34526 speed ? "true" : "false");
34528 debug_rtx (x);
34530 return ret;
34533 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34535 static int
34536 rs6000_debug_address_cost (rtx x, machine_mode mode,
34537 addr_space_t as, bool speed)
34539 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34541 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34542 ret, speed ? "true" : "false");
34543 debug_rtx (x);
34545 return ret;
34549 /* A C expression returning the cost of moving data from a register of class
34550 CLASS1 to one of CLASS2. */
34552 static int
34553 rs6000_register_move_cost (machine_mode mode,
34554 reg_class_t from, reg_class_t to)
34556 int ret;
34558 if (TARGET_DEBUG_COST)
34559 dbg_cost_ctrl++;
34561 /* Moves from/to GENERAL_REGS. */
34562 if (reg_classes_intersect_p (to, GENERAL_REGS)
34563 || reg_classes_intersect_p (from, GENERAL_REGS))
34565 reg_class_t rclass = from;
34567 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34568 rclass = to;
34570 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34571 ret = (rs6000_memory_move_cost (mode, rclass, false)
34572 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34574 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34575 shift. */
34576 else if (rclass == CR_REGS)
34577 ret = 4;
34579 /* For those processors that have slow LR/CTR moves, make them more
34580 expensive than memory in order to bias spills to memory .*/
34581 else if ((rs6000_cpu == PROCESSOR_POWER6
34582 || rs6000_cpu == PROCESSOR_POWER7
34583 || rs6000_cpu == PROCESSOR_POWER8
34584 || rs6000_cpu == PROCESSOR_POWER9)
34585 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34586 ret = 6 * hard_regno_nregs[0][mode];
34588 else
34589 /* A move will cost one instruction per GPR moved. */
34590 ret = 2 * hard_regno_nregs[0][mode];
34593 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34594 else if (VECTOR_MEM_VSX_P (mode)
34595 && reg_classes_intersect_p (to, VSX_REGS)
34596 && reg_classes_intersect_p (from, VSX_REGS))
34597 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
34599 /* Moving between two similar registers is just one instruction. */
34600 else if (reg_classes_intersect_p (to, from))
34601 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34603 /* Everything else has to go through GENERAL_REGS. */
34604 else
34605 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34606 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34608 if (TARGET_DEBUG_COST)
34610 if (dbg_cost_ctrl == 1)
34611 fprintf (stderr,
34612 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34613 ret, GET_MODE_NAME (mode), reg_class_names[from],
34614 reg_class_names[to]);
34615 dbg_cost_ctrl--;
34618 return ret;
34621 /* A C expressions returning the cost of moving data of MODE from a register to
34622 or from memory. */
34624 static int
34625 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34626 bool in ATTRIBUTE_UNUSED)
34628 int ret;
34630 if (TARGET_DEBUG_COST)
34631 dbg_cost_ctrl++;
34633 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34634 ret = 4 * hard_regno_nregs[0][mode];
34635 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34636 || reg_classes_intersect_p (rclass, VSX_REGS)))
34637 ret = 4 * hard_regno_nregs[32][mode];
34638 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34639 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
34640 else
34641 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34643 if (TARGET_DEBUG_COST)
34645 if (dbg_cost_ctrl == 1)
34646 fprintf (stderr,
34647 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34648 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34649 dbg_cost_ctrl--;
34652 return ret;
34655 /* Returns a code for a target-specific builtin that implements
34656 reciprocal of the function, or NULL_TREE if not available. */
34658 static tree
34659 rs6000_builtin_reciprocal (tree fndecl)
34661 switch (DECL_FUNCTION_CODE (fndecl))
34663 case VSX_BUILTIN_XVSQRTDP:
34664 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34665 return NULL_TREE;
34667 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34669 case VSX_BUILTIN_XVSQRTSP:
34670 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34671 return NULL_TREE;
34673 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34675 default:
34676 return NULL_TREE;
34680 /* Load up a constant. If the mode is a vector mode, splat the value across
34681 all of the vector elements. */
34683 static rtx
34684 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34686 rtx reg;
34688 if (mode == SFmode || mode == DFmode)
34690 rtx d = const_double_from_real_value (dconst, mode);
34691 reg = force_reg (mode, d);
34693 else if (mode == V4SFmode)
34695 rtx d = const_double_from_real_value (dconst, SFmode);
34696 rtvec v = gen_rtvec (4, d, d, d, d);
34697 reg = gen_reg_rtx (mode);
34698 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34700 else if (mode == V2DFmode)
34702 rtx d = const_double_from_real_value (dconst, DFmode);
34703 rtvec v = gen_rtvec (2, d, d);
34704 reg = gen_reg_rtx (mode);
34705 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34707 else
34708 gcc_unreachable ();
34710 return reg;
34713 /* Generate an FMA instruction. */
34715 static void
34716 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34718 machine_mode mode = GET_MODE (target);
34719 rtx dst;
34721 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34722 gcc_assert (dst != NULL);
34724 if (dst != target)
34725 emit_move_insn (target, dst);
34728 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34730 static void
34731 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34733 machine_mode mode = GET_MODE (dst);
34734 rtx r;
34736 /* This is a tad more complicated, since the fnma_optab is for
34737 a different expression: fma(-m1, m2, a), which is the same
34738 thing except in the case of signed zeros.
34740 Fortunately we know that if FMA is supported that FNMSUB is
34741 also supported in the ISA. Just expand it directly. */
34743 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34745 r = gen_rtx_NEG (mode, a);
34746 r = gen_rtx_FMA (mode, m1, m2, r);
34747 r = gen_rtx_NEG (mode, r);
34748 emit_insn (gen_rtx_SET (dst, r));
34751 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34752 add a reg_note saying that this was a division. Support both scalar and
34753 vector divide. Assumes no trapping math and finite arguments. */
34755 void
34756 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34758 machine_mode mode = GET_MODE (dst);
34759 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34760 int i;
34762 /* Low precision estimates guarantee 5 bits of accuracy. High
34763 precision estimates guarantee 14 bits of accuracy. SFmode
34764 requires 23 bits of accuracy. DFmode requires 52 bits of
34765 accuracy. Each pass at least doubles the accuracy, leading
34766 to the following. */
34767 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34768 if (mode == DFmode || mode == V2DFmode)
34769 passes++;
34771 enum insn_code code = optab_handler (smul_optab, mode);
34772 insn_gen_fn gen_mul = GEN_FCN (code);
34774 gcc_assert (code != CODE_FOR_nothing);
34776 one = rs6000_load_constant_and_splat (mode, dconst1);
34778 /* x0 = 1./d estimate */
34779 x0 = gen_reg_rtx (mode);
34780 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34781 UNSPEC_FRES)));
34783 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34784 if (passes > 1) {
34786 /* e0 = 1. - d * x0 */
34787 e0 = gen_reg_rtx (mode);
34788 rs6000_emit_nmsub (e0, d, x0, one);
34790 /* x1 = x0 + e0 * x0 */
34791 x1 = gen_reg_rtx (mode);
34792 rs6000_emit_madd (x1, e0, x0, x0);
34794 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34795 ++i, xprev = xnext, eprev = enext) {
34797 /* enext = eprev * eprev */
34798 enext = gen_reg_rtx (mode);
34799 emit_insn (gen_mul (enext, eprev, eprev));
34801 /* xnext = xprev + enext * xprev */
34802 xnext = gen_reg_rtx (mode);
34803 rs6000_emit_madd (xnext, enext, xprev, xprev);
34806 } else
34807 xprev = x0;
34809 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34811 /* u = n * xprev */
34812 u = gen_reg_rtx (mode);
34813 emit_insn (gen_mul (u, n, xprev));
34815 /* v = n - (d * u) */
34816 v = gen_reg_rtx (mode);
34817 rs6000_emit_nmsub (v, d, u, n);
34819 /* dst = (v * xprev) + u */
34820 rs6000_emit_madd (dst, v, xprev, u);
34822 if (note_p)
34823 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34826 /* Goldschmidt's Algorithm for single/double-precision floating point
34827 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34829 void
34830 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34832 machine_mode mode = GET_MODE (src);
34833 rtx e = gen_reg_rtx (mode);
34834 rtx g = gen_reg_rtx (mode);
34835 rtx h = gen_reg_rtx (mode);
34837 /* Low precision estimates guarantee 5 bits of accuracy. High
34838 precision estimates guarantee 14 bits of accuracy. SFmode
34839 requires 23 bits of accuracy. DFmode requires 52 bits of
34840 accuracy. Each pass at least doubles the accuracy, leading
34841 to the following. */
34842 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34843 if (mode == DFmode || mode == V2DFmode)
34844 passes++;
34846 int i;
34847 rtx mhalf;
34848 enum insn_code code = optab_handler (smul_optab, mode);
34849 insn_gen_fn gen_mul = GEN_FCN (code);
34851 gcc_assert (code != CODE_FOR_nothing);
34853 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34855 /* e = rsqrt estimate */
34856 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34857 UNSPEC_RSQRT)));
34859 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34860 if (!recip)
34862 rtx zero = force_reg (mode, CONST0_RTX (mode));
34864 if (mode == SFmode)
34866 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34867 e, zero, mode, 0);
34868 if (target != e)
34869 emit_move_insn (e, target);
34871 else
34873 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34874 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34878 /* g = sqrt estimate. */
34879 emit_insn (gen_mul (g, e, src));
34880 /* h = 1/(2*sqrt) estimate. */
34881 emit_insn (gen_mul (h, e, mhalf));
34883 if (recip)
34885 if (passes == 1)
34887 rtx t = gen_reg_rtx (mode);
34888 rs6000_emit_nmsub (t, g, h, mhalf);
34889 /* Apply correction directly to 1/rsqrt estimate. */
34890 rs6000_emit_madd (dst, e, t, e);
34892 else
34894 for (i = 0; i < passes; i++)
34896 rtx t1 = gen_reg_rtx (mode);
34897 rtx g1 = gen_reg_rtx (mode);
34898 rtx h1 = gen_reg_rtx (mode);
34900 rs6000_emit_nmsub (t1, g, h, mhalf);
34901 rs6000_emit_madd (g1, g, t1, g);
34902 rs6000_emit_madd (h1, h, t1, h);
34904 g = g1;
34905 h = h1;
34907 /* Multiply by 2 for 1/rsqrt. */
34908 emit_insn (gen_add3_insn (dst, h, h));
34911 else
34913 rtx t = gen_reg_rtx (mode);
34914 rs6000_emit_nmsub (t, g, h, mhalf);
34915 rs6000_emit_madd (dst, g, t, g);
34918 return;
34921 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34922 (Power7) targets. DST is the target, and SRC is the argument operand. */
34924 void
34925 rs6000_emit_popcount (rtx dst, rtx src)
34927 machine_mode mode = GET_MODE (dst);
34928 rtx tmp1, tmp2;
34930 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34931 if (TARGET_POPCNTD)
34933 if (mode == SImode)
34934 emit_insn (gen_popcntdsi2 (dst, src));
34935 else
34936 emit_insn (gen_popcntddi2 (dst, src));
34937 return;
34940 tmp1 = gen_reg_rtx (mode);
34942 if (mode == SImode)
34944 emit_insn (gen_popcntbsi2 (tmp1, src));
34945 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34946 NULL_RTX, 0);
34947 tmp2 = force_reg (SImode, tmp2);
34948 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34950 else
34952 emit_insn (gen_popcntbdi2 (tmp1, src));
34953 tmp2 = expand_mult (DImode, tmp1,
34954 GEN_INT ((HOST_WIDE_INT)
34955 0x01010101 << 32 | 0x01010101),
34956 NULL_RTX, 0);
34957 tmp2 = force_reg (DImode, tmp2);
34958 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34963 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34964 target, and SRC is the argument operand. */
34966 void
34967 rs6000_emit_parity (rtx dst, rtx src)
34969 machine_mode mode = GET_MODE (dst);
34970 rtx tmp;
34972 tmp = gen_reg_rtx (mode);
34974 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34975 if (TARGET_CMPB)
34977 if (mode == SImode)
34979 emit_insn (gen_popcntbsi2 (tmp, src));
34980 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34982 else
34984 emit_insn (gen_popcntbdi2 (tmp, src));
34985 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34987 return;
34990 if (mode == SImode)
34992 /* Is mult+shift >= shift+xor+shift+xor? */
34993 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34995 rtx tmp1, tmp2, tmp3, tmp4;
34997 tmp1 = gen_reg_rtx (SImode);
34998 emit_insn (gen_popcntbsi2 (tmp1, src));
35000 tmp2 = gen_reg_rtx (SImode);
35001 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35002 tmp3 = gen_reg_rtx (SImode);
35003 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35005 tmp4 = gen_reg_rtx (SImode);
35006 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35007 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35009 else
35010 rs6000_emit_popcount (tmp, src);
35011 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35013 else
35015 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35016 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35018 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35020 tmp1 = gen_reg_rtx (DImode);
35021 emit_insn (gen_popcntbdi2 (tmp1, src));
35023 tmp2 = gen_reg_rtx (DImode);
35024 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35025 tmp3 = gen_reg_rtx (DImode);
35026 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35028 tmp4 = gen_reg_rtx (DImode);
35029 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35030 tmp5 = gen_reg_rtx (DImode);
35031 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35033 tmp6 = gen_reg_rtx (DImode);
35034 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35035 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35037 else
35038 rs6000_emit_popcount (tmp, src);
35039 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35043 /* Expand an Altivec constant permutation for little endian mode.
35044 There are two issues: First, the two input operands must be
35045 swapped so that together they form a double-wide array in LE
35046 order. Second, the vperm instruction has surprising behavior
35047 in LE mode: it interprets the elements of the source vectors
35048 in BE mode ("left to right") and interprets the elements of
35049 the destination vector in LE mode ("right to left"). To
35050 correct for this, we must subtract each element of the permute
35051 control vector from 31.
35053 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35054 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35055 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35056 serve as the permute control vector. Then, in BE mode,
35058 vperm 9,10,11,12
35060 places the desired result in vr9. However, in LE mode the
35061 vector contents will be
35063 vr10 = 00000003 00000002 00000001 00000000
35064 vr11 = 00000007 00000006 00000005 00000004
35066 The result of the vperm using the same permute control vector is
35068 vr9 = 05000000 07000000 01000000 03000000
35070 That is, the leftmost 4 bytes of vr10 are interpreted as the
35071 source for the rightmost 4 bytes of vr9, and so on.
35073 If we change the permute control vector to
35075 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35077 and issue
35079 vperm 9,11,10,12
35081 we get the desired
35083 vr9 = 00000006 00000004 00000002 00000000. */
35085 void
35086 altivec_expand_vec_perm_const_le (rtx operands[4])
35088 unsigned int i;
35089 rtx perm[16];
35090 rtx constv, unspec;
35091 rtx target = operands[0];
35092 rtx op0 = operands[1];
35093 rtx op1 = operands[2];
35094 rtx sel = operands[3];
35096 /* Unpack and adjust the constant selector. */
35097 for (i = 0; i < 16; ++i)
35099 rtx e = XVECEXP (sel, 0, i);
35100 unsigned int elt = 31 - (INTVAL (e) & 31);
35101 perm[i] = GEN_INT (elt);
35104 /* Expand to a permute, swapping the inputs and using the
35105 adjusted selector. */
35106 if (!REG_P (op0))
35107 op0 = force_reg (V16QImode, op0);
35108 if (!REG_P (op1))
35109 op1 = force_reg (V16QImode, op1);
35111 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35112 constv = force_reg (V16QImode, constv);
35113 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35114 UNSPEC_VPERM);
35115 if (!REG_P (target))
35117 rtx tmp = gen_reg_rtx (V16QImode);
35118 emit_move_insn (tmp, unspec);
35119 unspec = tmp;
35122 emit_move_insn (target, unspec);
35125 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35126 permute control vector. But here it's not a constant, so we must
35127 generate a vector NAND or NOR to do the adjustment. */
35129 void
35130 altivec_expand_vec_perm_le (rtx operands[4])
35132 rtx notx, iorx, unspec;
35133 rtx target = operands[0];
35134 rtx op0 = operands[1];
35135 rtx op1 = operands[2];
35136 rtx sel = operands[3];
35137 rtx tmp = target;
35138 rtx norreg = gen_reg_rtx (V16QImode);
35139 machine_mode mode = GET_MODE (target);
35141 /* Get everything in regs so the pattern matches. */
35142 if (!REG_P (op0))
35143 op0 = force_reg (mode, op0);
35144 if (!REG_P (op1))
35145 op1 = force_reg (mode, op1);
35146 if (!REG_P (sel))
35147 sel = force_reg (V16QImode, sel);
35148 if (!REG_P (target))
35149 tmp = gen_reg_rtx (mode);
35151 if (TARGET_P9_VECTOR)
35153 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35154 UNSPEC_VPERMR);
35156 else
35158 /* Invert the selector with a VNAND if available, else a VNOR.
35159 The VNAND is preferred for future fusion opportunities. */
35160 notx = gen_rtx_NOT (V16QImode, sel);
35161 iorx = (TARGET_P8_VECTOR
35162 ? gen_rtx_IOR (V16QImode, notx, notx)
35163 : gen_rtx_AND (V16QImode, notx, notx));
35164 emit_insn (gen_rtx_SET (norreg, iorx));
35166 /* Permute with operands reversed and adjusted selector. */
35167 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35168 UNSPEC_VPERM);
35171 /* Copy into target, possibly by way of a register. */
35172 if (!REG_P (target))
35174 emit_move_insn (tmp, unspec);
35175 unspec = tmp;
35178 emit_move_insn (target, unspec);
35181 /* Expand an Altivec constant permutation. Return true if we match
35182 an efficient implementation; false to fall back to VPERM. */
35184 bool
35185 altivec_expand_vec_perm_const (rtx operands[4])
35187 struct altivec_perm_insn {
35188 HOST_WIDE_INT mask;
35189 enum insn_code impl;
35190 unsigned char perm[16];
35192 static const struct altivec_perm_insn patterns[] = {
35193 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35194 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35195 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35196 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35197 { OPTION_MASK_ALTIVEC,
35198 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35199 : CODE_FOR_altivec_vmrglb_direct),
35200 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35201 { OPTION_MASK_ALTIVEC,
35202 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35203 : CODE_FOR_altivec_vmrglh_direct),
35204 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35205 { OPTION_MASK_ALTIVEC,
35206 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35207 : CODE_FOR_altivec_vmrglw_direct),
35208 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35209 { OPTION_MASK_ALTIVEC,
35210 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35211 : CODE_FOR_altivec_vmrghb_direct),
35212 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35213 { OPTION_MASK_ALTIVEC,
35214 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35215 : CODE_FOR_altivec_vmrghh_direct),
35216 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35217 { OPTION_MASK_ALTIVEC,
35218 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35219 : CODE_FOR_altivec_vmrghw_direct),
35220 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35221 { OPTION_MASK_P8_VECTOR,
35222 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35223 : CODE_FOR_p8_vmrgow_v4sf_direct),
35224 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35225 { OPTION_MASK_P8_VECTOR,
35226 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35227 : CODE_FOR_p8_vmrgew_v4sf_direct),
35228 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35231 unsigned int i, j, elt, which;
35232 unsigned char perm[16];
35233 rtx target, op0, op1, sel, x;
35234 bool one_vec;
35236 target = operands[0];
35237 op0 = operands[1];
35238 op1 = operands[2];
35239 sel = operands[3];
35241 /* Unpack the constant selector. */
35242 for (i = which = 0; i < 16; ++i)
35244 rtx e = XVECEXP (sel, 0, i);
35245 elt = INTVAL (e) & 31;
35246 which |= (elt < 16 ? 1 : 2);
35247 perm[i] = elt;
35250 /* Simplify the constant selector based on operands. */
35251 switch (which)
35253 default:
35254 gcc_unreachable ();
35256 case 3:
35257 one_vec = false;
35258 if (!rtx_equal_p (op0, op1))
35259 break;
35260 /* FALLTHRU */
35262 case 2:
35263 for (i = 0; i < 16; ++i)
35264 perm[i] &= 15;
35265 op0 = op1;
35266 one_vec = true;
35267 break;
35269 case 1:
35270 op1 = op0;
35271 one_vec = true;
35272 break;
35275 /* Look for splat patterns. */
35276 if (one_vec)
35278 elt = perm[0];
35280 for (i = 0; i < 16; ++i)
35281 if (perm[i] != elt)
35282 break;
35283 if (i == 16)
35285 if (!BYTES_BIG_ENDIAN)
35286 elt = 15 - elt;
35287 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35288 return true;
35291 if (elt % 2 == 0)
35293 for (i = 0; i < 16; i += 2)
35294 if (perm[i] != elt || perm[i + 1] != elt + 1)
35295 break;
35296 if (i == 16)
35298 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35299 x = gen_reg_rtx (V8HImode);
35300 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35301 GEN_INT (field)));
35302 emit_move_insn (target, gen_lowpart (V16QImode, x));
35303 return true;
35307 if (elt % 4 == 0)
35309 for (i = 0; i < 16; i += 4)
35310 if (perm[i] != elt
35311 || perm[i + 1] != elt + 1
35312 || perm[i + 2] != elt + 2
35313 || perm[i + 3] != elt + 3)
35314 break;
35315 if (i == 16)
35317 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35318 x = gen_reg_rtx (V4SImode);
35319 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35320 GEN_INT (field)));
35321 emit_move_insn (target, gen_lowpart (V16QImode, x));
35322 return true;
35327 /* Look for merge and pack patterns. */
35328 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35330 bool swapped;
35332 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35333 continue;
35335 elt = patterns[j].perm[0];
35336 if (perm[0] == elt)
35337 swapped = false;
35338 else if (perm[0] == elt + 16)
35339 swapped = true;
35340 else
35341 continue;
35342 for (i = 1; i < 16; ++i)
35344 elt = patterns[j].perm[i];
35345 if (swapped)
35346 elt = (elt >= 16 ? elt - 16 : elt + 16);
35347 else if (one_vec && elt >= 16)
35348 elt -= 16;
35349 if (perm[i] != elt)
35350 break;
35352 if (i == 16)
35354 enum insn_code icode = patterns[j].impl;
35355 machine_mode omode = insn_data[icode].operand[0].mode;
35356 machine_mode imode = insn_data[icode].operand[1].mode;
35358 /* For little-endian, don't use vpkuwum and vpkuhum if the
35359 underlying vector type is not V4SI and V8HI, respectively.
35360 For example, using vpkuwum with a V8HI picks up the even
35361 halfwords (BE numbering) when the even halfwords (LE
35362 numbering) are what we need. */
35363 if (!BYTES_BIG_ENDIAN
35364 && icode == CODE_FOR_altivec_vpkuwum_direct
35365 && ((GET_CODE (op0) == REG
35366 && GET_MODE (op0) != V4SImode)
35367 || (GET_CODE (op0) == SUBREG
35368 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35369 continue;
35370 if (!BYTES_BIG_ENDIAN
35371 && icode == CODE_FOR_altivec_vpkuhum_direct
35372 && ((GET_CODE (op0) == REG
35373 && GET_MODE (op0) != V8HImode)
35374 || (GET_CODE (op0) == SUBREG
35375 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35376 continue;
35378 /* For little-endian, the two input operands must be swapped
35379 (or swapped back) to ensure proper right-to-left numbering
35380 from 0 to 2N-1. */
35381 if (swapped ^ !BYTES_BIG_ENDIAN)
35382 std::swap (op0, op1);
35383 if (imode != V16QImode)
35385 op0 = gen_lowpart (imode, op0);
35386 op1 = gen_lowpart (imode, op1);
35388 if (omode == V16QImode)
35389 x = target;
35390 else
35391 x = gen_reg_rtx (omode);
35392 emit_insn (GEN_FCN (icode) (x, op0, op1));
35393 if (omode != V16QImode)
35394 emit_move_insn (target, gen_lowpart (V16QImode, x));
35395 return true;
35399 if (!BYTES_BIG_ENDIAN)
35401 altivec_expand_vec_perm_const_le (operands);
35402 return true;
35405 return false;
35408 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35409 Return true if we match an efficient implementation. */
35411 static bool
35412 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35413 unsigned char perm0, unsigned char perm1)
35415 rtx x;
35417 /* If both selectors come from the same operand, fold to single op. */
35418 if ((perm0 & 2) == (perm1 & 2))
35420 if (perm0 & 2)
35421 op0 = op1;
35422 else
35423 op1 = op0;
35425 /* If both operands are equal, fold to simpler permutation. */
35426 if (rtx_equal_p (op0, op1))
35428 perm0 = perm0 & 1;
35429 perm1 = (perm1 & 1) + 2;
35431 /* If the first selector comes from the second operand, swap. */
35432 else if (perm0 & 2)
35434 if (perm1 & 2)
35435 return false;
35436 perm0 -= 2;
35437 perm1 += 2;
35438 std::swap (op0, op1);
35440 /* If the second selector does not come from the second operand, fail. */
35441 else if ((perm1 & 2) == 0)
35442 return false;
35444 /* Success! */
35445 if (target != NULL)
35447 machine_mode vmode, dmode;
35448 rtvec v;
35450 vmode = GET_MODE (target);
35451 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35452 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
35453 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35454 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35455 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35456 emit_insn (gen_rtx_SET (target, x));
35458 return true;
35461 bool
35462 rs6000_expand_vec_perm_const (rtx operands[4])
35464 rtx target, op0, op1, sel;
35465 unsigned char perm0, perm1;
35467 target = operands[0];
35468 op0 = operands[1];
35469 op1 = operands[2];
35470 sel = operands[3];
35472 /* Unpack the constant selector. */
35473 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35474 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35476 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35479 /* Test whether a constant permutation is supported. */
35481 static bool
35482 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35483 const unsigned char *sel)
35485 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35486 if (TARGET_ALTIVEC)
35487 return true;
35489 /* Check for ps_merge* or evmerge* insns. */
35490 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35492 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35493 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35494 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35497 return false;
35500 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35502 static void
35503 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35504 machine_mode vmode, unsigned nelt, rtx perm[])
35506 machine_mode imode;
35507 rtx x;
35509 imode = vmode;
35510 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35511 imode = mode_for_vector
35512 (int_mode_for_mode (GET_MODE_INNER (vmode)).require (), nelt);
35514 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35515 x = expand_vec_perm (vmode, op0, op1, x, target);
35516 if (x != target)
35517 emit_move_insn (target, x);
35520 /* Expand an extract even operation. */
35522 void
35523 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35525 machine_mode vmode = GET_MODE (target);
35526 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35527 rtx perm[16];
35529 for (i = 0; i < nelt; i++)
35530 perm[i] = GEN_INT (i * 2);
35532 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35535 /* Expand a vector interleave operation. */
35537 void
35538 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35540 machine_mode vmode = GET_MODE (target);
35541 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35542 rtx perm[16];
35544 high = (highp ? 0 : nelt / 2);
35545 for (i = 0; i < nelt / 2; i++)
35547 perm[i * 2] = GEN_INT (i + high);
35548 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35551 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35554 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35555 void
35556 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35558 HOST_WIDE_INT hwi_scale (scale);
35559 REAL_VALUE_TYPE r_pow;
35560 rtvec v = rtvec_alloc (2);
35561 rtx elt;
35562 rtx scale_vec = gen_reg_rtx (V2DFmode);
35563 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35564 elt = const_double_from_real_value (r_pow, DFmode);
35565 RTVEC_ELT (v, 0) = elt;
35566 RTVEC_ELT (v, 1) = elt;
35567 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35568 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35571 /* Return an RTX representing where to find the function value of a
35572 function returning MODE. */
35573 static rtx
35574 rs6000_complex_function_value (machine_mode mode)
35576 unsigned int regno;
35577 rtx r1, r2;
35578 machine_mode inner = GET_MODE_INNER (mode);
35579 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35581 if (TARGET_FLOAT128_TYPE
35582 && (mode == KCmode
35583 || (mode == TCmode && TARGET_IEEEQUAD)))
35584 regno = ALTIVEC_ARG_RETURN;
35586 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35587 regno = FP_ARG_RETURN;
35589 else
35591 regno = GP_ARG_RETURN;
35593 /* 32-bit is OK since it'll go in r3/r4. */
35594 if (TARGET_32BIT && inner_bytes >= 4)
35595 return gen_rtx_REG (mode, regno);
35598 if (inner_bytes >= 8)
35599 return gen_rtx_REG (mode, regno);
35601 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35602 const0_rtx);
35603 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35604 GEN_INT (inner_bytes));
35605 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35608 /* Return an rtx describing a return value of MODE as a PARALLEL
35609 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35610 stride REG_STRIDE. */
35612 static rtx
35613 rs6000_parallel_return (machine_mode mode,
35614 int n_elts, machine_mode elt_mode,
35615 unsigned int regno, unsigned int reg_stride)
35617 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35619 int i;
35620 for (i = 0; i < n_elts; i++)
35622 rtx r = gen_rtx_REG (elt_mode, regno);
35623 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35624 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35625 regno += reg_stride;
35628 return par;
35631 /* Target hook for TARGET_FUNCTION_VALUE.
35633 An integer value is in r3 and a floating-point value is in fp1,
35634 unless -msoft-float. */
35636 static rtx
35637 rs6000_function_value (const_tree valtype,
35638 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35639 bool outgoing ATTRIBUTE_UNUSED)
35641 machine_mode mode;
35642 unsigned int regno;
35643 machine_mode elt_mode;
35644 int n_elts;
35646 /* Special handling for structs in darwin64. */
35647 if (TARGET_MACHO
35648 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35650 CUMULATIVE_ARGS valcum;
35651 rtx valret;
35653 valcum.words = 0;
35654 valcum.fregno = FP_ARG_MIN_REG;
35655 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35656 /* Do a trial code generation as if this were going to be passed as
35657 an argument; if any part goes in memory, we return NULL. */
35658 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35659 if (valret)
35660 return valret;
35661 /* Otherwise fall through to standard ABI rules. */
35664 mode = TYPE_MODE (valtype);
35666 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35667 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35669 int first_reg, n_regs;
35671 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35673 /* _Decimal128 must use even/odd register pairs. */
35674 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35675 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35677 else
35679 first_reg = ALTIVEC_ARG_RETURN;
35680 n_regs = 1;
35683 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35686 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35687 if (TARGET_32BIT && TARGET_POWERPC64)
35688 switch (mode)
35690 default:
35691 break;
35692 case E_DImode:
35693 case E_SCmode:
35694 case E_DCmode:
35695 case E_TCmode:
35696 int count = GET_MODE_SIZE (mode) / 4;
35697 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35700 if ((INTEGRAL_TYPE_P (valtype)
35701 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35702 || POINTER_TYPE_P (valtype))
35703 mode = TARGET_32BIT ? SImode : DImode;
35705 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35706 /* _Decimal128 must use an even/odd register pair. */
35707 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35708 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35709 && !FLOAT128_VECTOR_P (mode)
35710 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35711 regno = FP_ARG_RETURN;
35712 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35713 && targetm.calls.split_complex_arg)
35714 return rs6000_complex_function_value (mode);
35715 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35716 return register is used in both cases, and we won't see V2DImode/V2DFmode
35717 for pure altivec, combine the two cases. */
35718 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35719 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35720 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35721 regno = ALTIVEC_ARG_RETURN;
35722 else
35723 regno = GP_ARG_RETURN;
35725 return gen_rtx_REG (mode, regno);
35728 /* Define how to find the value returned by a library function
35729 assuming the value has mode MODE. */
35731 rs6000_libcall_value (machine_mode mode)
35733 unsigned int regno;
35735 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35736 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35737 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35739 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35740 /* _Decimal128 must use an even/odd register pair. */
35741 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35742 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35743 && TARGET_HARD_FLOAT
35744 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35745 regno = FP_ARG_RETURN;
35746 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35747 return register is used in both cases, and we won't see V2DImode/V2DFmode
35748 for pure altivec, combine the two cases. */
35749 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35750 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35751 regno = ALTIVEC_ARG_RETURN;
35752 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35753 return rs6000_complex_function_value (mode);
35754 else
35755 regno = GP_ARG_RETURN;
35757 return gen_rtx_REG (mode, regno);
35760 /* Compute register pressure classes. We implement the target hook to avoid
35761 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35762 lead to incorrect estimates of number of available registers and therefor
35763 increased register pressure/spill. */
35764 static int
35765 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35767 int n;
35769 n = 0;
35770 pressure_classes[n++] = GENERAL_REGS;
35771 if (TARGET_VSX)
35772 pressure_classes[n++] = VSX_REGS;
35773 else
35775 if (TARGET_ALTIVEC)
35776 pressure_classes[n++] = ALTIVEC_REGS;
35777 if (TARGET_HARD_FLOAT)
35778 pressure_classes[n++] = FLOAT_REGS;
35780 pressure_classes[n++] = CR_REGS;
35781 pressure_classes[n++] = SPECIAL_REGS;
35783 return n;
35786 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35787 Frame pointer elimination is automatically handled.
35789 For the RS/6000, if frame pointer elimination is being done, we would like
35790 to convert ap into fp, not sp.
35792 We need r30 if -mminimal-toc was specified, and there are constant pool
35793 references. */
35795 static bool
35796 rs6000_can_eliminate (const int from, const int to)
35798 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35799 ? ! frame_pointer_needed
35800 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35801 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35802 || constant_pool_empty_p ()
35803 : true);
35806 /* Define the offset between two registers, FROM to be eliminated and its
35807 replacement TO, at the start of a routine. */
35808 HOST_WIDE_INT
35809 rs6000_initial_elimination_offset (int from, int to)
35811 rs6000_stack_t *info = rs6000_stack_info ();
35812 HOST_WIDE_INT offset;
35814 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35815 offset = info->push_p ? 0 : -info->total_size;
35816 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35818 offset = info->push_p ? 0 : -info->total_size;
35819 if (FRAME_GROWS_DOWNWARD)
35820 offset += info->fixed_size + info->vars_size + info->parm_size;
35822 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35823 offset = FRAME_GROWS_DOWNWARD
35824 ? info->fixed_size + info->vars_size + info->parm_size
35825 : 0;
35826 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35827 offset = info->total_size;
35828 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35829 offset = info->push_p ? info->total_size : 0;
35830 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35831 offset = 0;
35832 else
35833 gcc_unreachable ();
35835 return offset;
35838 /* Fill in sizes of registers used by unwinder. */
35840 static void
35841 rs6000_init_dwarf_reg_sizes_extra (tree address)
35843 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35845 int i;
35846 machine_mode mode = TYPE_MODE (char_type_node);
35847 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35848 rtx mem = gen_rtx_MEM (BLKmode, addr);
35849 rtx value = gen_int_mode (16, mode);
35851 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35852 The unwinder still needs to know the size of Altivec registers. */
35854 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35856 int column = DWARF_REG_TO_UNWIND_COLUMN
35857 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35858 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35860 emit_move_insn (adjust_address (mem, mode, offset), value);
35865 /* Map internal gcc register numbers to debug format register numbers.
35866 FORMAT specifies the type of debug register number to use:
35867 0 -- debug information, except for frame-related sections
35868 1 -- DWARF .debug_frame section
35869 2 -- DWARF .eh_frame section */
35871 unsigned int
35872 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35874 /* Except for the above, we use the internal number for non-DWARF
35875 debug information, and also for .eh_frame. */
35876 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35877 return regno;
35879 /* On some platforms, we use the standard DWARF register
35880 numbering for .debug_info and .debug_frame. */
35881 #ifdef RS6000_USE_DWARF_NUMBERING
35882 if (regno <= 63)
35883 return regno;
35884 if (regno == LR_REGNO)
35885 return 108;
35886 if (regno == CTR_REGNO)
35887 return 109;
35888 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35889 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35890 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35891 to the DWARF reg for CR. */
35892 if (format == 1 && regno == CR2_REGNO)
35893 return 64;
35894 if (CR_REGNO_P (regno))
35895 return regno - CR0_REGNO + 86;
35896 if (regno == CA_REGNO)
35897 return 101; /* XER */
35898 if (ALTIVEC_REGNO_P (regno))
35899 return regno - FIRST_ALTIVEC_REGNO + 1124;
35900 if (regno == VRSAVE_REGNO)
35901 return 356;
35902 if (regno == VSCR_REGNO)
35903 return 67;
35904 #endif
35905 return regno;
35908 /* target hook eh_return_filter_mode */
35909 static scalar_int_mode
35910 rs6000_eh_return_filter_mode (void)
35912 return TARGET_32BIT ? SImode : word_mode;
35915 /* Target hook for scalar_mode_supported_p. */
35916 static bool
35917 rs6000_scalar_mode_supported_p (scalar_mode mode)
35919 /* -m32 does not support TImode. This is the default, from
35920 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35921 same ABI as for -m32. But default_scalar_mode_supported_p allows
35922 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35923 for -mpowerpc64. */
35924 if (TARGET_32BIT && mode == TImode)
35925 return false;
35927 if (DECIMAL_FLOAT_MODE_P (mode))
35928 return default_decimal_float_supported_p ();
35929 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35930 return true;
35931 else
35932 return default_scalar_mode_supported_p (mode);
35935 /* Target hook for vector_mode_supported_p. */
35936 static bool
35937 rs6000_vector_mode_supported_p (machine_mode mode)
35940 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
35941 return true;
35943 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35944 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35945 double-double. */
35946 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35947 return true;
35949 else
35950 return false;
35953 /* Target hook for floatn_mode. */
35954 static opt_scalar_float_mode
35955 rs6000_floatn_mode (int n, bool extended)
35957 if (extended)
35959 switch (n)
35961 case 32:
35962 return DFmode;
35964 case 64:
35965 if (TARGET_FLOAT128_KEYWORD)
35966 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35967 else
35968 return opt_scalar_float_mode ();
35970 case 128:
35971 return opt_scalar_float_mode ();
35973 default:
35974 /* Those are the only valid _FloatNx types. */
35975 gcc_unreachable ();
35978 else
35980 switch (n)
35982 case 32:
35983 return SFmode;
35985 case 64:
35986 return DFmode;
35988 case 128:
35989 if (TARGET_FLOAT128_KEYWORD)
35990 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35991 else
35992 return opt_scalar_float_mode ();
35994 default:
35995 return opt_scalar_float_mode ();
36001 /* Target hook for c_mode_for_suffix. */
36002 static machine_mode
36003 rs6000_c_mode_for_suffix (char suffix)
36005 if (TARGET_FLOAT128_TYPE)
36007 if (suffix == 'q' || suffix == 'Q')
36008 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36010 /* At the moment, we are not defining a suffix for IBM extended double.
36011 If/when the default for -mabi=ieeelongdouble is changed, and we want
36012 to support __ibm128 constants in legacy library code, we may need to
36013 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36014 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36015 __float80 constants. */
36018 return VOIDmode;
36021 /* Target hook for invalid_arg_for_unprototyped_fn. */
36022 static const char *
36023 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36025 return (!rs6000_darwin64_abi
36026 && typelist == 0
36027 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36028 && (funcdecl == NULL_TREE
36029 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36030 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36031 ? N_("AltiVec argument passed to unprototyped function")
36032 : NULL;
36035 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36036 setup by using __stack_chk_fail_local hidden function instead of
36037 calling __stack_chk_fail directly. Otherwise it is better to call
36038 __stack_chk_fail directly. */
36040 static tree ATTRIBUTE_UNUSED
36041 rs6000_stack_protect_fail (void)
36043 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36044 ? default_hidden_stack_protect_fail ()
36045 : default_external_stack_protect_fail ();
36048 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36050 #if TARGET_ELF
36051 static unsigned HOST_WIDE_INT
36052 rs6000_asan_shadow_offset (void)
36054 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36056 #endif
36058 /* Mask options that we want to support inside of attribute((target)) and
36059 #pragma GCC target operations. Note, we do not include things like
36060 64/32-bit, endianness, hard/soft floating point, etc. that would have
36061 different calling sequences. */
36063 struct rs6000_opt_mask {
36064 const char *name; /* option name */
36065 HOST_WIDE_INT mask; /* mask to set */
36066 bool invert; /* invert sense of mask */
36067 bool valid_target; /* option is a target option */
36070 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36072 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36073 { "cmpb", OPTION_MASK_CMPB, false, true },
36074 { "crypto", OPTION_MASK_CRYPTO, false, true },
36075 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36076 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36077 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36078 false, true },
36079 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, false },
36080 { "float128-type", OPTION_MASK_FLOAT128_TYPE, false, false },
36081 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
36082 { "fprnd", OPTION_MASK_FPRND, false, true },
36083 { "hard-dfp", OPTION_MASK_DFP, false, true },
36084 { "htm", OPTION_MASK_HTM, false, true },
36085 { "isel", OPTION_MASK_ISEL, false, true },
36086 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36087 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36088 { "modulo", OPTION_MASK_MODULO, false, true },
36089 { "mulhw", OPTION_MASK_MULHW, false, true },
36090 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36091 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36092 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36093 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36094 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36095 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36096 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36097 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36098 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36099 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36100 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36101 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36102 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36103 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36104 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36105 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36106 { "string", OPTION_MASK_STRING, false, true },
36107 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36108 { "update", OPTION_MASK_NO_UPDATE, true , true },
36109 { "vsx", OPTION_MASK_VSX, false, true },
36110 #ifdef OPTION_MASK_64BIT
36111 #if TARGET_AIX_OS
36112 { "aix64", OPTION_MASK_64BIT, false, false },
36113 { "aix32", OPTION_MASK_64BIT, true, false },
36114 #else
36115 { "64", OPTION_MASK_64BIT, false, false },
36116 { "32", OPTION_MASK_64BIT, true, false },
36117 #endif
36118 #endif
36119 #ifdef OPTION_MASK_EABI
36120 { "eabi", OPTION_MASK_EABI, false, false },
36121 #endif
36122 #ifdef OPTION_MASK_LITTLE_ENDIAN
36123 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36124 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36125 #endif
36126 #ifdef OPTION_MASK_RELOCATABLE
36127 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36128 #endif
36129 #ifdef OPTION_MASK_STRICT_ALIGN
36130 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36131 #endif
36132 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36133 { "string", OPTION_MASK_STRING, false, false },
36136 /* Builtin mask mapping for printing the flags. */
36137 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36139 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36140 { "vsx", RS6000_BTM_VSX, false, false },
36141 { "paired", RS6000_BTM_PAIRED, false, false },
36142 { "fre", RS6000_BTM_FRE, false, false },
36143 { "fres", RS6000_BTM_FRES, false, false },
36144 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36145 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36146 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36147 { "cell", RS6000_BTM_CELL, false, false },
36148 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36149 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36150 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36151 { "crypto", RS6000_BTM_CRYPTO, false, false },
36152 { "htm", RS6000_BTM_HTM, false, false },
36153 { "hard-dfp", RS6000_BTM_DFP, false, false },
36154 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36155 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36156 { "float128", RS6000_BTM_FLOAT128, false, false },
36159 /* Option variables that we want to support inside attribute((target)) and
36160 #pragma GCC target operations. */
36162 struct rs6000_opt_var {
36163 const char *name; /* option name */
36164 size_t global_offset; /* offset of the option in global_options. */
36165 size_t target_offset; /* offset of the option in target options. */
36168 static struct rs6000_opt_var const rs6000_opt_vars[] =
36170 { "friz",
36171 offsetof (struct gcc_options, x_TARGET_FRIZ),
36172 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36173 { "avoid-indexed-addresses",
36174 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36175 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36176 { "paired",
36177 offsetof (struct gcc_options, x_rs6000_paired_float),
36178 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36179 { "longcall",
36180 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36181 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36182 { "optimize-swaps",
36183 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36184 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36185 { "allow-movmisalign",
36186 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36187 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36188 { "sched-groups",
36189 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36190 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36191 { "always-hint",
36192 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36193 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36194 { "align-branch-targets",
36195 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36196 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36197 { "tls-markers",
36198 offsetof (struct gcc_options, x_tls_markers),
36199 offsetof (struct cl_target_option, x_tls_markers), },
36200 { "sched-prolog",
36201 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36202 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36203 { "sched-epilog",
36204 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36205 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36208 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36209 parsing. Return true if there were no errors. */
36211 static bool
36212 rs6000_inner_target_options (tree args, bool attr_p)
36214 bool ret = true;
36216 if (args == NULL_TREE)
36219 else if (TREE_CODE (args) == STRING_CST)
36221 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36222 char *q;
36224 while ((q = strtok (p, ",")) != NULL)
36226 bool error_p = false;
36227 bool not_valid_p = false;
36228 const char *cpu_opt = NULL;
36230 p = NULL;
36231 if (strncmp (q, "cpu=", 4) == 0)
36233 int cpu_index = rs6000_cpu_name_lookup (q+4);
36234 if (cpu_index >= 0)
36235 rs6000_cpu_index = cpu_index;
36236 else
36238 error_p = true;
36239 cpu_opt = q+4;
36242 else if (strncmp (q, "tune=", 5) == 0)
36244 int tune_index = rs6000_cpu_name_lookup (q+5);
36245 if (tune_index >= 0)
36246 rs6000_tune_index = tune_index;
36247 else
36249 error_p = true;
36250 cpu_opt = q+5;
36253 else
36255 size_t i;
36256 bool invert = false;
36257 char *r = q;
36259 error_p = true;
36260 if (strncmp (r, "no-", 3) == 0)
36262 invert = true;
36263 r += 3;
36266 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36267 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36269 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36271 if (!rs6000_opt_masks[i].valid_target)
36272 not_valid_p = true;
36273 else
36275 error_p = false;
36276 rs6000_isa_flags_explicit |= mask;
36278 /* VSX needs altivec, so -mvsx automagically sets
36279 altivec and disables -mavoid-indexed-addresses. */
36280 if (!invert)
36282 if (mask == OPTION_MASK_VSX)
36284 mask |= OPTION_MASK_ALTIVEC;
36285 TARGET_AVOID_XFORM = 0;
36289 if (rs6000_opt_masks[i].invert)
36290 invert = !invert;
36292 if (invert)
36293 rs6000_isa_flags &= ~mask;
36294 else
36295 rs6000_isa_flags |= mask;
36297 break;
36300 if (error_p && !not_valid_p)
36302 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36303 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36305 size_t j = rs6000_opt_vars[i].global_offset;
36306 *((int *) ((char *)&global_options + j)) = !invert;
36307 error_p = false;
36308 not_valid_p = false;
36309 break;
36314 if (error_p)
36316 const char *eprefix, *esuffix;
36318 ret = false;
36319 if (attr_p)
36321 eprefix = "__attribute__((__target__(";
36322 esuffix = ")))";
36324 else
36326 eprefix = "#pragma GCC target ";
36327 esuffix = "";
36330 if (cpu_opt)
36331 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36332 q, esuffix);
36333 else if (not_valid_p)
36334 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36335 else
36336 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36341 else if (TREE_CODE (args) == TREE_LIST)
36345 tree value = TREE_VALUE (args);
36346 if (value)
36348 bool ret2 = rs6000_inner_target_options (value, attr_p);
36349 if (!ret2)
36350 ret = false;
36352 args = TREE_CHAIN (args);
36354 while (args != NULL_TREE);
36357 else
36359 error ("attribute %<target%> argument not a string");
36360 return false;
36363 return ret;
36366 /* Print out the target options as a list for -mdebug=target. */
36368 static void
36369 rs6000_debug_target_options (tree args, const char *prefix)
36371 if (args == NULL_TREE)
36372 fprintf (stderr, "%s<NULL>", prefix);
36374 else if (TREE_CODE (args) == STRING_CST)
36376 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36377 char *q;
36379 while ((q = strtok (p, ",")) != NULL)
36381 p = NULL;
36382 fprintf (stderr, "%s\"%s\"", prefix, q);
36383 prefix = ", ";
36387 else if (TREE_CODE (args) == TREE_LIST)
36391 tree value = TREE_VALUE (args);
36392 if (value)
36394 rs6000_debug_target_options (value, prefix);
36395 prefix = ", ";
36397 args = TREE_CHAIN (args);
36399 while (args != NULL_TREE);
36402 else
36403 gcc_unreachable ();
36405 return;
36409 /* Hook to validate attribute((target("..."))). */
36411 static bool
36412 rs6000_valid_attribute_p (tree fndecl,
36413 tree ARG_UNUSED (name),
36414 tree args,
36415 int flags)
36417 struct cl_target_option cur_target;
36418 bool ret;
36419 tree old_optimize = build_optimization_node (&global_options);
36420 tree new_target, new_optimize;
36421 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36423 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36425 if (TARGET_DEBUG_TARGET)
36427 tree tname = DECL_NAME (fndecl);
36428 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36429 if (tname)
36430 fprintf (stderr, "function: %.*s\n",
36431 (int) IDENTIFIER_LENGTH (tname),
36432 IDENTIFIER_POINTER (tname));
36433 else
36434 fprintf (stderr, "function: unknown\n");
36436 fprintf (stderr, "args:");
36437 rs6000_debug_target_options (args, " ");
36438 fprintf (stderr, "\n");
36440 if (flags)
36441 fprintf (stderr, "flags: 0x%x\n", flags);
36443 fprintf (stderr, "--------------------\n");
36446 /* attribute((target("default"))) does nothing, beyond
36447 affecting multi-versioning. */
36448 if (TREE_VALUE (args)
36449 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36450 && TREE_CHAIN (args) == NULL_TREE
36451 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36452 return true;
36454 old_optimize = build_optimization_node (&global_options);
36455 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36457 /* If the function changed the optimization levels as well as setting target
36458 options, start with the optimizations specified. */
36459 if (func_optimize && func_optimize != old_optimize)
36460 cl_optimization_restore (&global_options,
36461 TREE_OPTIMIZATION (func_optimize));
36463 /* The target attributes may also change some optimization flags, so update
36464 the optimization options if necessary. */
36465 cl_target_option_save (&cur_target, &global_options);
36466 rs6000_cpu_index = rs6000_tune_index = -1;
36467 ret = rs6000_inner_target_options (args, true);
36469 /* Set up any additional state. */
36470 if (ret)
36472 ret = rs6000_option_override_internal (false);
36473 new_target = build_target_option_node (&global_options);
36475 else
36476 new_target = NULL;
36478 new_optimize = build_optimization_node (&global_options);
36480 if (!new_target)
36481 ret = false;
36483 else if (fndecl)
36485 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36487 if (old_optimize != new_optimize)
36488 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36491 cl_target_option_restore (&global_options, &cur_target);
36493 if (old_optimize != new_optimize)
36494 cl_optimization_restore (&global_options,
36495 TREE_OPTIMIZATION (old_optimize));
36497 return ret;
36501 /* Hook to validate the current #pragma GCC target and set the state, and
36502 update the macros based on what was changed. If ARGS is NULL, then
36503 POP_TARGET is used to reset the options. */
36505 bool
36506 rs6000_pragma_target_parse (tree args, tree pop_target)
36508 tree prev_tree = build_target_option_node (&global_options);
36509 tree cur_tree;
36510 struct cl_target_option *prev_opt, *cur_opt;
36511 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36512 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36514 if (TARGET_DEBUG_TARGET)
36516 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36517 fprintf (stderr, "args:");
36518 rs6000_debug_target_options (args, " ");
36519 fprintf (stderr, "\n");
36521 if (pop_target)
36523 fprintf (stderr, "pop_target:\n");
36524 debug_tree (pop_target);
36526 else
36527 fprintf (stderr, "pop_target: <NULL>\n");
36529 fprintf (stderr, "--------------------\n");
36532 if (! args)
36534 cur_tree = ((pop_target)
36535 ? pop_target
36536 : target_option_default_node);
36537 cl_target_option_restore (&global_options,
36538 TREE_TARGET_OPTION (cur_tree));
36540 else
36542 rs6000_cpu_index = rs6000_tune_index = -1;
36543 if (!rs6000_inner_target_options (args, false)
36544 || !rs6000_option_override_internal (false)
36545 || (cur_tree = build_target_option_node (&global_options))
36546 == NULL_TREE)
36548 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36549 fprintf (stderr, "invalid pragma\n");
36551 return false;
36555 target_option_current_node = cur_tree;
36557 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36558 change the macros that are defined. */
36559 if (rs6000_target_modify_macros_ptr)
36561 prev_opt = TREE_TARGET_OPTION (prev_tree);
36562 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36563 prev_flags = prev_opt->x_rs6000_isa_flags;
36565 cur_opt = TREE_TARGET_OPTION (cur_tree);
36566 cur_flags = cur_opt->x_rs6000_isa_flags;
36567 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36569 diff_bumask = (prev_bumask ^ cur_bumask);
36570 diff_flags = (prev_flags ^ cur_flags);
36572 if ((diff_flags != 0) || (diff_bumask != 0))
36574 /* Delete old macros. */
36575 rs6000_target_modify_macros_ptr (false,
36576 prev_flags & diff_flags,
36577 prev_bumask & diff_bumask);
36579 /* Define new macros. */
36580 rs6000_target_modify_macros_ptr (true,
36581 cur_flags & diff_flags,
36582 cur_bumask & diff_bumask);
36586 return true;
36590 /* Remember the last target of rs6000_set_current_function. */
36591 static GTY(()) tree rs6000_previous_fndecl;
36593 /* Restore target's globals from NEW_TREE and invalidate the
36594 rs6000_previous_fndecl cache. */
36596 static void
36597 rs6000_activate_target_options (tree new_tree)
36599 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36600 if (TREE_TARGET_GLOBALS (new_tree))
36601 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36602 else if (new_tree == target_option_default_node)
36603 restore_target_globals (&default_target_globals);
36604 else
36605 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36606 rs6000_previous_fndecl = NULL_TREE;
36609 /* Establish appropriate back-end context for processing the function
36610 FNDECL. The argument might be NULL to indicate processing at top
36611 level, outside of any function scope. */
36612 static void
36613 rs6000_set_current_function (tree fndecl)
36615 if (TARGET_DEBUG_TARGET)
36617 fprintf (stderr, "\n==================== rs6000_set_current_function");
36619 if (fndecl)
36620 fprintf (stderr, ", fndecl %s (%p)",
36621 (DECL_NAME (fndecl)
36622 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36623 : "<unknown>"), (void *)fndecl);
36625 if (rs6000_previous_fndecl)
36626 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36628 fprintf (stderr, "\n");
36631 /* Only change the context if the function changes. This hook is called
36632 several times in the course of compiling a function, and we don't want to
36633 slow things down too much or call target_reinit when it isn't safe. */
36634 if (fndecl == rs6000_previous_fndecl)
36635 return;
36637 tree old_tree;
36638 if (rs6000_previous_fndecl == NULL_TREE)
36639 old_tree = target_option_current_node;
36640 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36641 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36642 else
36643 old_tree = target_option_default_node;
36645 tree new_tree;
36646 if (fndecl == NULL_TREE)
36648 if (old_tree != target_option_current_node)
36649 new_tree = target_option_current_node;
36650 else
36651 new_tree = NULL_TREE;
36653 else
36655 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36656 if (new_tree == NULL_TREE)
36657 new_tree = target_option_default_node;
36660 if (TARGET_DEBUG_TARGET)
36662 if (new_tree)
36664 fprintf (stderr, "\nnew fndecl target specific options:\n");
36665 debug_tree (new_tree);
36668 if (old_tree)
36670 fprintf (stderr, "\nold fndecl target specific options:\n");
36671 debug_tree (old_tree);
36674 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36675 fprintf (stderr, "--------------------\n");
36678 if (new_tree && old_tree != new_tree)
36679 rs6000_activate_target_options (new_tree);
36681 if (fndecl)
36682 rs6000_previous_fndecl = fndecl;
36686 /* Save the current options */
36688 static void
36689 rs6000_function_specific_save (struct cl_target_option *ptr,
36690 struct gcc_options *opts)
36692 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36693 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36696 /* Restore the current options */
36698 static void
36699 rs6000_function_specific_restore (struct gcc_options *opts,
36700 struct cl_target_option *ptr)
36703 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36704 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36705 (void) rs6000_option_override_internal (false);
36708 /* Print the current options */
36710 static void
36711 rs6000_function_specific_print (FILE *file, int indent,
36712 struct cl_target_option *ptr)
36714 rs6000_print_isa_options (file, indent, "Isa options set",
36715 ptr->x_rs6000_isa_flags);
36717 rs6000_print_isa_options (file, indent, "Isa options explicit",
36718 ptr->x_rs6000_isa_flags_explicit);
36721 /* Helper function to print the current isa or misc options on a line. */
36723 static void
36724 rs6000_print_options_internal (FILE *file,
36725 int indent,
36726 const char *string,
36727 HOST_WIDE_INT flags,
36728 const char *prefix,
36729 const struct rs6000_opt_mask *opts,
36730 size_t num_elements)
36732 size_t i;
36733 size_t start_column = 0;
36734 size_t cur_column;
36735 size_t max_column = 120;
36736 size_t prefix_len = strlen (prefix);
36737 size_t comma_len = 0;
36738 const char *comma = "";
36740 if (indent)
36741 start_column += fprintf (file, "%*s", indent, "");
36743 if (!flags)
36745 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36746 return;
36749 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36751 /* Print the various mask options. */
36752 cur_column = start_column;
36753 for (i = 0; i < num_elements; i++)
36755 bool invert = opts[i].invert;
36756 const char *name = opts[i].name;
36757 const char *no_str = "";
36758 HOST_WIDE_INT mask = opts[i].mask;
36759 size_t len = comma_len + prefix_len + strlen (name);
36761 if (!invert)
36763 if ((flags & mask) == 0)
36765 no_str = "no-";
36766 len += sizeof ("no-") - 1;
36769 flags &= ~mask;
36772 else
36774 if ((flags & mask) != 0)
36776 no_str = "no-";
36777 len += sizeof ("no-") - 1;
36780 flags |= mask;
36783 cur_column += len;
36784 if (cur_column > max_column)
36786 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36787 cur_column = start_column + len;
36788 comma = "";
36791 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36792 comma = ", ";
36793 comma_len = sizeof (", ") - 1;
36796 fputs ("\n", file);
36799 /* Helper function to print the current isa options on a line. */
36801 static void
36802 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36803 HOST_WIDE_INT flags)
36805 rs6000_print_options_internal (file, indent, string, flags, "-m",
36806 &rs6000_opt_masks[0],
36807 ARRAY_SIZE (rs6000_opt_masks));
36810 static void
36811 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36812 HOST_WIDE_INT flags)
36814 rs6000_print_options_internal (file, indent, string, flags, "",
36815 &rs6000_builtin_mask_names[0],
36816 ARRAY_SIZE (rs6000_builtin_mask_names));
36819 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36820 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36821 -mupper-regs-df, etc.).
36823 If the user used -mno-power8-vector, we need to turn off all of the implicit
36824 ISA 2.07 and 3.0 options that relate to the vector unit.
36826 If the user used -mno-power9-vector, we need to turn off all of the implicit
36827 ISA 3.0 options that relate to the vector unit.
36829 This function does not handle explicit options such as the user specifying
36830 -mdirect-move. These are handled in rs6000_option_override_internal, and
36831 the appropriate error is given if needed.
36833 We return a mask of all of the implicit options that should not be enabled
36834 by default. */
36836 static HOST_WIDE_INT
36837 rs6000_disable_incompatible_switches (void)
36839 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36840 size_t i, j;
36842 static const struct {
36843 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36844 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36845 const char *const name; /* name of the switch. */
36846 } flags[] = {
36847 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36848 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36849 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36852 for (i = 0; i < ARRAY_SIZE (flags); i++)
36854 HOST_WIDE_INT no_flag = flags[i].no_flag;
36856 if ((rs6000_isa_flags & no_flag) == 0
36857 && (rs6000_isa_flags_explicit & no_flag) != 0)
36859 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36860 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36861 & rs6000_isa_flags
36862 & dep_flags);
36864 if (set_flags)
36866 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36867 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36869 set_flags &= ~rs6000_opt_masks[j].mask;
36870 error ("%<-mno-%s%> turns off %<-m%s%>",
36871 flags[i].name,
36872 rs6000_opt_masks[j].name);
36875 gcc_assert (!set_flags);
36878 rs6000_isa_flags &= ~dep_flags;
36879 ignore_masks |= no_flag | dep_flags;
36883 return ignore_masks;
36887 /* Helper function for printing the function name when debugging. */
36889 static const char *
36890 get_decl_name (tree fn)
36892 tree name;
36894 if (!fn)
36895 return "<null>";
36897 name = DECL_NAME (fn);
36898 if (!name)
36899 return "<no-name>";
36901 return IDENTIFIER_POINTER (name);
36904 /* Return the clone id of the target we are compiling code for in a target
36905 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36906 the priority list for the target clones (ordered from lowest to
36907 highest). */
36909 static int
36910 rs6000_clone_priority (tree fndecl)
36912 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36913 HOST_WIDE_INT isa_masks;
36914 int ret = CLONE_DEFAULT;
36915 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36916 const char *attrs_str = NULL;
36918 attrs = TREE_VALUE (TREE_VALUE (attrs));
36919 attrs_str = TREE_STRING_POINTER (attrs);
36921 /* Return priority zero for default function. Return the ISA needed for the
36922 function if it is not the default. */
36923 if (strcmp (attrs_str, "default") != 0)
36925 if (fn_opts == NULL_TREE)
36926 fn_opts = target_option_default_node;
36928 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36929 isa_masks = rs6000_isa_flags;
36930 else
36931 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36933 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36934 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36935 break;
36938 if (TARGET_DEBUG_TARGET)
36939 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36940 get_decl_name (fndecl), ret);
36942 return ret;
36945 /* This compares the priority of target features in function DECL1 and DECL2.
36946 It returns positive value if DECL1 is higher priority, negative value if
36947 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36948 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36950 static int
36951 rs6000_compare_version_priority (tree decl1, tree decl2)
36953 int priority1 = rs6000_clone_priority (decl1);
36954 int priority2 = rs6000_clone_priority (decl2);
36955 int ret = priority1 - priority2;
36957 if (TARGET_DEBUG_TARGET)
36958 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36959 get_decl_name (decl1), get_decl_name (decl2), ret);
36961 return ret;
36964 /* Make a dispatcher declaration for the multi-versioned function DECL.
36965 Calls to DECL function will be replaced with calls to the dispatcher
36966 by the front-end. Returns the decl of the dispatcher function. */
36968 static tree
36969 rs6000_get_function_versions_dispatcher (void *decl)
36971 tree fn = (tree) decl;
36972 struct cgraph_node *node = NULL;
36973 struct cgraph_node *default_node = NULL;
36974 struct cgraph_function_version_info *node_v = NULL;
36975 struct cgraph_function_version_info *first_v = NULL;
36977 tree dispatch_decl = NULL;
36979 struct cgraph_function_version_info *default_version_info = NULL;
36980 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36982 if (TARGET_DEBUG_TARGET)
36983 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36984 get_decl_name (fn));
36986 node = cgraph_node::get (fn);
36987 gcc_assert (node != NULL);
36989 node_v = node->function_version ();
36990 gcc_assert (node_v != NULL);
36992 if (node_v->dispatcher_resolver != NULL)
36993 return node_v->dispatcher_resolver;
36995 /* Find the default version and make it the first node. */
36996 first_v = node_v;
36997 /* Go to the beginning of the chain. */
36998 while (first_v->prev != NULL)
36999 first_v = first_v->prev;
37001 default_version_info = first_v;
37002 while (default_version_info != NULL)
37004 const tree decl2 = default_version_info->this_node->decl;
37005 if (is_function_default_version (decl2))
37006 break;
37007 default_version_info = default_version_info->next;
37010 /* If there is no default node, just return NULL. */
37011 if (default_version_info == NULL)
37012 return NULL;
37014 /* Make default info the first node. */
37015 if (first_v != default_version_info)
37017 default_version_info->prev->next = default_version_info->next;
37018 if (default_version_info->next)
37019 default_version_info->next->prev = default_version_info->prev;
37020 first_v->prev = default_version_info;
37021 default_version_info->next = first_v;
37022 default_version_info->prev = NULL;
37025 default_node = default_version_info->this_node;
37027 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37028 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37029 "target_clones attribute needs GLIBC (2.23 and newer) that "
37030 "exports hardware capability bits");
37031 #else
37033 if (targetm.has_ifunc_p ())
37035 struct cgraph_function_version_info *it_v = NULL;
37036 struct cgraph_node *dispatcher_node = NULL;
37037 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37039 /* Right now, the dispatching is done via ifunc. */
37040 dispatch_decl = make_dispatcher_decl (default_node->decl);
37042 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37043 gcc_assert (dispatcher_node != NULL);
37044 dispatcher_node->dispatcher_function = 1;
37045 dispatcher_version_info
37046 = dispatcher_node->insert_new_function_version ();
37047 dispatcher_version_info->next = default_version_info;
37048 dispatcher_node->definition = 1;
37050 /* Set the dispatcher for all the versions. */
37051 it_v = default_version_info;
37052 while (it_v != NULL)
37054 it_v->dispatcher_resolver = dispatch_decl;
37055 it_v = it_v->next;
37058 else
37060 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37061 "multiversioning needs ifunc which is not supported "
37062 "on this target");
37064 #endif
37066 return dispatch_decl;
37069 /* Make the resolver function decl to dispatch the versions of a multi-
37070 versioned function, DEFAULT_DECL. Create an empty basic block in the
37071 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37072 function. */
37074 static tree
37075 make_resolver_func (const tree default_decl,
37076 const tree dispatch_decl,
37077 basic_block *empty_bb)
37079 /* Make the resolver function static. The resolver function returns
37080 void *. */
37081 tree decl_name = clone_function_name (default_decl, "resolver");
37082 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37083 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37084 tree decl = build_fn_decl (resolver_name, type);
37085 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37087 DECL_NAME (decl) = decl_name;
37088 TREE_USED (decl) = 1;
37089 DECL_ARTIFICIAL (decl) = 1;
37090 DECL_IGNORED_P (decl) = 0;
37091 TREE_PUBLIC (decl) = 0;
37092 DECL_UNINLINABLE (decl) = 1;
37094 /* Resolver is not external, body is generated. */
37095 DECL_EXTERNAL (decl) = 0;
37096 DECL_EXTERNAL (dispatch_decl) = 0;
37098 DECL_CONTEXT (decl) = NULL_TREE;
37099 DECL_INITIAL (decl) = make_node (BLOCK);
37100 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37102 /* Build result decl and add to function_decl. */
37103 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37104 DECL_ARTIFICIAL (t) = 1;
37105 DECL_IGNORED_P (t) = 1;
37106 DECL_RESULT (decl) = t;
37108 gimplify_function_tree (decl);
37109 push_cfun (DECL_STRUCT_FUNCTION (decl));
37110 *empty_bb = init_lowered_empty_function (decl, false,
37111 profile_count::uninitialized ());
37113 cgraph_node::add_new_function (decl, true);
37114 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37116 pop_cfun ();
37118 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37119 DECL_ATTRIBUTES (dispatch_decl)
37120 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37122 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37124 return decl;
37127 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37128 return a pointer to VERSION_DECL if we are running on a machine that
37129 supports the index CLONE_ISA hardware architecture bits. This function will
37130 be called during version dispatch to decide which function version to
37131 execute. It returns the basic block at the end, to which more conditions
37132 can be added. */
37134 static basic_block
37135 add_condition_to_bb (tree function_decl, tree version_decl,
37136 int clone_isa, basic_block new_bb)
37138 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37140 gcc_assert (new_bb != NULL);
37141 gimple_seq gseq = bb_seq (new_bb);
37144 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37145 build_fold_addr_expr (version_decl));
37146 tree result_var = create_tmp_var (ptr_type_node);
37147 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37148 gimple *return_stmt = gimple_build_return (result_var);
37150 if (clone_isa == CLONE_DEFAULT)
37152 gimple_seq_add_stmt (&gseq, convert_stmt);
37153 gimple_seq_add_stmt (&gseq, return_stmt);
37154 set_bb_seq (new_bb, gseq);
37155 gimple_set_bb (convert_stmt, new_bb);
37156 gimple_set_bb (return_stmt, new_bb);
37157 pop_cfun ();
37158 return new_bb;
37161 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37162 tree cond_var = create_tmp_var (bool_int_type_node);
37163 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37164 const char *arg_str = rs6000_clone_map[clone_isa].name;
37165 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37166 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37167 gimple_call_set_lhs (call_cond_stmt, cond_var);
37169 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37170 gimple_set_bb (call_cond_stmt, new_bb);
37171 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37173 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37174 NULL_TREE, NULL_TREE);
37175 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37176 gimple_set_bb (if_else_stmt, new_bb);
37177 gimple_seq_add_stmt (&gseq, if_else_stmt);
37179 gimple_seq_add_stmt (&gseq, convert_stmt);
37180 gimple_seq_add_stmt (&gseq, return_stmt);
37181 set_bb_seq (new_bb, gseq);
37183 basic_block bb1 = new_bb;
37184 edge e12 = split_block (bb1, if_else_stmt);
37185 basic_block bb2 = e12->dest;
37186 e12->flags &= ~EDGE_FALLTHRU;
37187 e12->flags |= EDGE_TRUE_VALUE;
37189 edge e23 = split_block (bb2, return_stmt);
37190 gimple_set_bb (convert_stmt, bb2);
37191 gimple_set_bb (return_stmt, bb2);
37193 basic_block bb3 = e23->dest;
37194 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37196 remove_edge (e23);
37197 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37199 pop_cfun ();
37200 return bb3;
37203 /* This function generates the dispatch function for multi-versioned functions.
37204 DISPATCH_DECL is the function which will contain the dispatch logic.
37205 FNDECLS are the function choices for dispatch, and is a tree chain.
37206 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37207 code is generated. */
37209 static int
37210 dispatch_function_versions (tree dispatch_decl,
37211 void *fndecls_p,
37212 basic_block *empty_bb)
37214 int ix;
37215 tree ele;
37216 vec<tree> *fndecls;
37217 tree clones[CLONE_MAX];
37219 if (TARGET_DEBUG_TARGET)
37220 fputs ("dispatch_function_versions, top\n", stderr);
37222 gcc_assert (dispatch_decl != NULL
37223 && fndecls_p != NULL
37224 && empty_bb != NULL);
37226 /* fndecls_p is actually a vector. */
37227 fndecls = static_cast<vec<tree> *> (fndecls_p);
37229 /* At least one more version other than the default. */
37230 gcc_assert (fndecls->length () >= 2);
37232 /* The first version in the vector is the default decl. */
37233 memset ((void *) clones, '\0', sizeof (clones));
37234 clones[CLONE_DEFAULT] = (*fndecls)[0];
37236 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37237 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37238 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37239 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37240 to insert the code here to do the call. */
37242 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37244 int priority = rs6000_clone_priority (ele);
37245 if (!clones[priority])
37246 clones[priority] = ele;
37249 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37250 if (clones[ix])
37252 if (TARGET_DEBUG_TARGET)
37253 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37254 ix, get_decl_name (clones[ix]));
37256 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37257 *empty_bb);
37260 return 0;
37263 /* Generate the dispatching code body to dispatch multi-versioned function
37264 DECL. The target hook is called to process the "target" attributes and
37265 provide the code to dispatch the right function at run-time. NODE points
37266 to the dispatcher decl whose body will be created. */
37268 static tree
37269 rs6000_generate_version_dispatcher_body (void *node_p)
37271 tree resolver;
37272 basic_block empty_bb;
37273 struct cgraph_node *node = (cgraph_node *) node_p;
37274 struct cgraph_function_version_info *ninfo = node->function_version ();
37276 if (ninfo->dispatcher_resolver)
37277 return ninfo->dispatcher_resolver;
37279 /* node is going to be an alias, so remove the finalized bit. */
37280 node->definition = false;
37282 /* The first version in the chain corresponds to the default version. */
37283 ninfo->dispatcher_resolver = resolver
37284 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37286 if (TARGET_DEBUG_TARGET)
37287 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37288 get_decl_name (resolver));
37290 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37291 auto_vec<tree, 2> fn_ver_vec;
37293 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37294 vinfo;
37295 vinfo = vinfo->next)
37297 struct cgraph_node *version = vinfo->this_node;
37298 /* Check for virtual functions here again, as by this time it should
37299 have been determined if this function needs a vtable index or
37300 not. This happens for methods in derived classes that override
37301 virtual methods in base classes but are not explicitly marked as
37302 virtual. */
37303 if (DECL_VINDEX (version->decl))
37304 sorry ("Virtual function multiversioning not supported");
37306 fn_ver_vec.safe_push (version->decl);
37309 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37310 cgraph_edge::rebuild_edges ();
37311 pop_cfun ();
37312 return resolver;
37316 /* Hook to determine if one function can safely inline another. */
37318 static bool
37319 rs6000_can_inline_p (tree caller, tree callee)
37321 bool ret = false;
37322 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37323 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37325 /* If callee has no option attributes, then it is ok to inline. */
37326 if (!callee_tree)
37327 ret = true;
37329 /* If caller has no option attributes, but callee does then it is not ok to
37330 inline. */
37331 else if (!caller_tree)
37332 ret = false;
37334 else
37336 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37337 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37339 /* Callee's options should a subset of the caller's, i.e. a vsx function
37340 can inline an altivec function but a non-vsx function can't inline a
37341 vsx function. */
37342 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37343 == callee_opts->x_rs6000_isa_flags)
37344 ret = true;
37347 if (TARGET_DEBUG_TARGET)
37348 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37349 get_decl_name (caller), get_decl_name (callee),
37350 (ret ? "can" : "cannot"));
37352 return ret;
37355 /* Allocate a stack temp and fixup the address so it meets the particular
37356 memory requirements (either offetable or REG+REG addressing). */
37359 rs6000_allocate_stack_temp (machine_mode mode,
37360 bool offsettable_p,
37361 bool reg_reg_p)
37363 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37364 rtx addr = XEXP (stack, 0);
37365 int strict_p = reload_completed;
37367 if (!legitimate_indirect_address_p (addr, strict_p))
37369 if (offsettable_p
37370 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37371 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37373 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37374 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37377 return stack;
37380 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37381 to such a form to deal with memory reference instructions like STFIWX that
37382 only take reg+reg addressing. */
37385 rs6000_address_for_fpconvert (rtx x)
37387 rtx addr;
37389 gcc_assert (MEM_P (x));
37390 addr = XEXP (x, 0);
37391 if (! legitimate_indirect_address_p (addr, reload_completed)
37392 && ! legitimate_indexed_address_p (addr, reload_completed))
37394 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37396 rtx reg = XEXP (addr, 0);
37397 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37398 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37399 gcc_assert (REG_P (reg));
37400 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37401 addr = reg;
37403 else if (GET_CODE (addr) == PRE_MODIFY)
37405 rtx reg = XEXP (addr, 0);
37406 rtx expr = XEXP (addr, 1);
37407 gcc_assert (REG_P (reg));
37408 gcc_assert (GET_CODE (expr) == PLUS);
37409 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37410 addr = reg;
37413 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37416 return x;
37419 /* Given a memory reference, if it is not in the form for altivec memory
37420 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37421 convert to the altivec format. */
37424 rs6000_address_for_altivec (rtx x)
37426 gcc_assert (MEM_P (x));
37427 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37429 rtx addr = XEXP (x, 0);
37431 if (!legitimate_indexed_address_p (addr, reload_completed)
37432 && !legitimate_indirect_address_p (addr, reload_completed))
37433 addr = copy_to_mode_reg (Pmode, addr);
37435 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37436 x = change_address (x, GET_MODE (x), addr);
37439 return x;
37442 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37444 On the RS/6000, all integer constants are acceptable, most won't be valid
37445 for particular insns, though. Only easy FP constants are acceptable. */
37447 static bool
37448 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37450 if (TARGET_ELF && tls_referenced_p (x))
37451 return false;
37453 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37454 || GET_MODE (x) == VOIDmode
37455 || (TARGET_POWERPC64 && mode == DImode)
37456 || easy_fp_constant (x, mode)
37457 || easy_vector_constant (x, mode));
37461 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37463 static bool
37464 chain_already_loaded (rtx_insn *last)
37466 for (; last != NULL; last = PREV_INSN (last))
37468 if (NONJUMP_INSN_P (last))
37470 rtx patt = PATTERN (last);
37472 if (GET_CODE (patt) == SET)
37474 rtx lhs = XEXP (patt, 0);
37476 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37477 return true;
37481 return false;
37484 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37486 void
37487 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37489 const bool direct_call_p
37490 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37491 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37492 rtx toc_load = NULL_RTX;
37493 rtx toc_restore = NULL_RTX;
37494 rtx func_addr;
37495 rtx abi_reg = NULL_RTX;
37496 rtx call[4];
37497 int n_call;
37498 rtx insn;
37500 /* Handle longcall attributes. */
37501 if (INTVAL (cookie) & CALL_LONG)
37502 func_desc = rs6000_longcall_ref (func_desc);
37504 /* Handle indirect calls. */
37505 if (GET_CODE (func_desc) != SYMBOL_REF
37506 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37508 /* Save the TOC into its reserved slot before the call,
37509 and prepare to restore it after the call. */
37510 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37511 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37512 rtx stack_toc_mem = gen_frame_mem (Pmode,
37513 gen_rtx_PLUS (Pmode, stack_ptr,
37514 stack_toc_offset));
37515 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37516 gen_rtvec (1, stack_toc_offset),
37517 UNSPEC_TOCSLOT);
37518 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37520 /* Can we optimize saving the TOC in the prologue or
37521 do we need to do it at every call? */
37522 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37523 cfun->machine->save_toc_in_prologue = true;
37524 else
37526 MEM_VOLATILE_P (stack_toc_mem) = 1;
37527 emit_move_insn (stack_toc_mem, toc_reg);
37530 if (DEFAULT_ABI == ABI_ELFv2)
37532 /* A function pointer in the ELFv2 ABI is just a plain address, but
37533 the ABI requires it to be loaded into r12 before the call. */
37534 func_addr = gen_rtx_REG (Pmode, 12);
37535 emit_move_insn (func_addr, func_desc);
37536 abi_reg = func_addr;
37538 else
37540 /* A function pointer under AIX is a pointer to a data area whose
37541 first word contains the actual address of the function, whose
37542 second word contains a pointer to its TOC, and whose third word
37543 contains a value to place in the static chain register (r11).
37544 Note that if we load the static chain, our "trampoline" need
37545 not have any executable code. */
37547 /* Load up address of the actual function. */
37548 func_desc = force_reg (Pmode, func_desc);
37549 func_addr = gen_reg_rtx (Pmode);
37550 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37552 /* Prepare to load the TOC of the called function. Note that the
37553 TOC load must happen immediately before the actual call so
37554 that unwinding the TOC registers works correctly. See the
37555 comment in frob_update_context. */
37556 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37557 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37558 gen_rtx_PLUS (Pmode, func_desc,
37559 func_toc_offset));
37560 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37562 /* If we have a static chain, load it up. But, if the call was
37563 originally direct, the 3rd word has not been written since no
37564 trampoline has been built, so we ought not to load it, lest we
37565 override a static chain value. */
37566 if (!direct_call_p
37567 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37568 && !chain_already_loaded (get_current_sequence ()->next->last))
37570 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37571 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37572 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37573 gen_rtx_PLUS (Pmode, func_desc,
37574 func_sc_offset));
37575 emit_move_insn (sc_reg, func_sc_mem);
37576 abi_reg = sc_reg;
37580 else
37582 /* Direct calls use the TOC: for local calls, the callee will
37583 assume the TOC register is set; for non-local calls, the
37584 PLT stub needs the TOC register. */
37585 abi_reg = toc_reg;
37586 func_addr = func_desc;
37589 /* Create the call. */
37590 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37591 if (value != NULL_RTX)
37592 call[0] = gen_rtx_SET (value, call[0]);
37593 n_call = 1;
37595 if (toc_load)
37596 call[n_call++] = toc_load;
37597 if (toc_restore)
37598 call[n_call++] = toc_restore;
37600 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37602 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37603 insn = emit_call_insn (insn);
37605 /* Mention all registers defined by the ABI to hold information
37606 as uses in CALL_INSN_FUNCTION_USAGE. */
37607 if (abi_reg)
37608 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37611 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37613 void
37614 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37616 rtx call[2];
37617 rtx insn;
37619 gcc_assert (INTVAL (cookie) == 0);
37621 /* Create the call. */
37622 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37623 if (value != NULL_RTX)
37624 call[0] = gen_rtx_SET (value, call[0]);
37626 call[1] = simple_return_rtx;
37628 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37629 insn = emit_call_insn (insn);
37631 /* Note use of the TOC register. */
37632 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37635 /* Return whether we need to always update the saved TOC pointer when we update
37636 the stack pointer. */
37638 static bool
37639 rs6000_save_toc_in_prologue_p (void)
37641 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37644 #ifdef HAVE_GAS_HIDDEN
37645 # define USE_HIDDEN_LINKONCE 1
37646 #else
37647 # define USE_HIDDEN_LINKONCE 0
37648 #endif
37650 /* Fills in the label name that should be used for a 476 link stack thunk. */
37652 void
37653 get_ppc476_thunk_name (char name[32])
37655 gcc_assert (TARGET_LINK_STACK);
37657 if (USE_HIDDEN_LINKONCE)
37658 sprintf (name, "__ppc476.get_thunk");
37659 else
37660 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37663 /* This function emits the simple thunk routine that is used to preserve
37664 the link stack on the 476 cpu. */
37666 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37667 static void
37668 rs6000_code_end (void)
37670 char name[32];
37671 tree decl;
37673 if (!TARGET_LINK_STACK)
37674 return;
37676 get_ppc476_thunk_name (name);
37678 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37679 build_function_type_list (void_type_node, NULL_TREE));
37680 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37681 NULL_TREE, void_type_node);
37682 TREE_PUBLIC (decl) = 1;
37683 TREE_STATIC (decl) = 1;
37685 #if RS6000_WEAK
37686 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37688 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37689 targetm.asm_out.unique_section (decl, 0);
37690 switch_to_section (get_named_section (decl, NULL, 0));
37691 DECL_WEAK (decl) = 1;
37692 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37693 targetm.asm_out.globalize_label (asm_out_file, name);
37694 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37695 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37697 else
37698 #endif
37700 switch_to_section (text_section);
37701 ASM_OUTPUT_LABEL (asm_out_file, name);
37704 DECL_INITIAL (decl) = make_node (BLOCK);
37705 current_function_decl = decl;
37706 allocate_struct_function (decl, false);
37707 init_function_start (decl);
37708 first_function_block_is_cold = false;
37709 /* Make sure unwind info is emitted for the thunk if needed. */
37710 final_start_function (emit_barrier (), asm_out_file, 1);
37712 fputs ("\tblr\n", asm_out_file);
37714 final_end_function ();
37715 init_insn_lengths ();
37716 free_after_compilation (cfun);
37717 set_cfun (NULL);
37718 current_function_decl = NULL;
37721 /* Add r30 to hard reg set if the prologue sets it up and it is not
37722 pic_offset_table_rtx. */
37724 static void
37725 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37727 if (!TARGET_SINGLE_PIC_BASE
37728 && TARGET_TOC
37729 && TARGET_MINIMAL_TOC
37730 && !constant_pool_empty_p ())
37731 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37732 if (cfun->machine->split_stack_argp_used)
37733 add_to_hard_reg_set (&set->set, Pmode, 12);
37737 /* Helper function for rs6000_split_logical to emit a logical instruction after
37738 spliting the operation to single GPR registers.
37740 DEST is the destination register.
37741 OP1 and OP2 are the input source registers.
37742 CODE is the base operation (AND, IOR, XOR, NOT).
37743 MODE is the machine mode.
37744 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37745 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37746 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37748 static void
37749 rs6000_split_logical_inner (rtx dest,
37750 rtx op1,
37751 rtx op2,
37752 enum rtx_code code,
37753 machine_mode mode,
37754 bool complement_final_p,
37755 bool complement_op1_p,
37756 bool complement_op2_p)
37758 rtx bool_rtx;
37760 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37761 if (op2 && GET_CODE (op2) == CONST_INT
37762 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37763 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37765 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37766 HOST_WIDE_INT value = INTVAL (op2) & mask;
37768 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37769 if (code == AND)
37771 if (value == 0)
37773 emit_insn (gen_rtx_SET (dest, const0_rtx));
37774 return;
37777 else if (value == mask)
37779 if (!rtx_equal_p (dest, op1))
37780 emit_insn (gen_rtx_SET (dest, op1));
37781 return;
37785 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37786 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37787 else if (code == IOR || code == XOR)
37789 if (value == 0)
37791 if (!rtx_equal_p (dest, op1))
37792 emit_insn (gen_rtx_SET (dest, op1));
37793 return;
37798 if (code == AND && mode == SImode
37799 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37801 emit_insn (gen_andsi3 (dest, op1, op2));
37802 return;
37805 if (complement_op1_p)
37806 op1 = gen_rtx_NOT (mode, op1);
37808 if (complement_op2_p)
37809 op2 = gen_rtx_NOT (mode, op2);
37811 /* For canonical RTL, if only one arm is inverted it is the first. */
37812 if (!complement_op1_p && complement_op2_p)
37813 std::swap (op1, op2);
37815 bool_rtx = ((code == NOT)
37816 ? gen_rtx_NOT (mode, op1)
37817 : gen_rtx_fmt_ee (code, mode, op1, op2));
37819 if (complement_final_p)
37820 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37822 emit_insn (gen_rtx_SET (dest, bool_rtx));
37825 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37826 operations are split immediately during RTL generation to allow for more
37827 optimizations of the AND/IOR/XOR.
37829 OPERANDS is an array containing the destination and two input operands.
37830 CODE is the base operation (AND, IOR, XOR, NOT).
37831 MODE is the machine mode.
37832 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37833 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37834 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37835 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37836 formation of the AND instructions. */
37838 static void
37839 rs6000_split_logical_di (rtx operands[3],
37840 enum rtx_code code,
37841 bool complement_final_p,
37842 bool complement_op1_p,
37843 bool complement_op2_p)
37845 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37846 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37847 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37848 enum hi_lo { hi = 0, lo = 1 };
37849 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37850 size_t i;
37852 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37853 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37854 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37855 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37857 if (code == NOT)
37858 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37859 else
37861 if (GET_CODE (operands[2]) != CONST_INT)
37863 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37864 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37866 else
37868 HOST_WIDE_INT value = INTVAL (operands[2]);
37869 HOST_WIDE_INT value_hi_lo[2];
37871 gcc_assert (!complement_final_p);
37872 gcc_assert (!complement_op1_p);
37873 gcc_assert (!complement_op2_p);
37875 value_hi_lo[hi] = value >> 32;
37876 value_hi_lo[lo] = value & lower_32bits;
37878 for (i = 0; i < 2; i++)
37880 HOST_WIDE_INT sub_value = value_hi_lo[i];
37882 if (sub_value & sign_bit)
37883 sub_value |= upper_32bits;
37885 op2_hi_lo[i] = GEN_INT (sub_value);
37887 /* If this is an AND instruction, check to see if we need to load
37888 the value in a register. */
37889 if (code == AND && sub_value != -1 && sub_value != 0
37890 && !and_operand (op2_hi_lo[i], SImode))
37891 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37896 for (i = 0; i < 2; i++)
37898 /* Split large IOR/XOR operations. */
37899 if ((code == IOR || code == XOR)
37900 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37901 && !complement_final_p
37902 && !complement_op1_p
37903 && !complement_op2_p
37904 && !logical_const_operand (op2_hi_lo[i], SImode))
37906 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37907 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37908 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37909 rtx tmp = gen_reg_rtx (SImode);
37911 /* Make sure the constant is sign extended. */
37912 if ((hi_16bits & sign_bit) != 0)
37913 hi_16bits |= upper_32bits;
37915 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37916 code, SImode, false, false, false);
37918 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37919 code, SImode, false, false, false);
37921 else
37922 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37923 code, SImode, complement_final_p,
37924 complement_op1_p, complement_op2_p);
37927 return;
37930 /* Split the insns that make up boolean operations operating on multiple GPR
37931 registers. The boolean MD patterns ensure that the inputs either are
37932 exactly the same as the output registers, or there is no overlap.
37934 OPERANDS is an array containing the destination and two input operands.
37935 CODE is the base operation (AND, IOR, XOR, NOT).
37936 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37937 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37938 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37940 void
37941 rs6000_split_logical (rtx operands[3],
37942 enum rtx_code code,
37943 bool complement_final_p,
37944 bool complement_op1_p,
37945 bool complement_op2_p)
37947 machine_mode mode = GET_MODE (operands[0]);
37948 machine_mode sub_mode;
37949 rtx op0, op1, op2;
37950 int sub_size, regno0, regno1, nregs, i;
37952 /* If this is DImode, use the specialized version that can run before
37953 register allocation. */
37954 if (mode == DImode && !TARGET_POWERPC64)
37956 rs6000_split_logical_di (operands, code, complement_final_p,
37957 complement_op1_p, complement_op2_p);
37958 return;
37961 op0 = operands[0];
37962 op1 = operands[1];
37963 op2 = (code == NOT) ? NULL_RTX : operands[2];
37964 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37965 sub_size = GET_MODE_SIZE (sub_mode);
37966 regno0 = REGNO (op0);
37967 regno1 = REGNO (op1);
37969 gcc_assert (reload_completed);
37970 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37971 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37973 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37974 gcc_assert (nregs > 1);
37976 if (op2 && REG_P (op2))
37977 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37979 for (i = 0; i < nregs; i++)
37981 int offset = i * sub_size;
37982 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37983 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37984 rtx sub_op2 = ((code == NOT)
37985 ? NULL_RTX
37986 : simplify_subreg (sub_mode, op2, mode, offset));
37988 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37989 complement_final_p, complement_op1_p,
37990 complement_op2_p);
37993 return;
37997 /* Return true if the peephole2 can combine a load involving a combination of
37998 an addis instruction and a load with an offset that can be fused together on
37999 a power8. */
38001 bool
38002 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38003 rtx addis_value, /* addis value. */
38004 rtx target, /* target register that is loaded. */
38005 rtx mem) /* bottom part of the memory addr. */
38007 rtx addr;
38008 rtx base_reg;
38010 /* Validate arguments. */
38011 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38012 return false;
38014 if (!base_reg_operand (target, GET_MODE (target)))
38015 return false;
38017 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38018 return false;
38020 /* Allow sign/zero extension. */
38021 if (GET_CODE (mem) == ZERO_EXTEND
38022 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38023 mem = XEXP (mem, 0);
38025 if (!MEM_P (mem))
38026 return false;
38028 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38029 return false;
38031 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38032 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38033 return false;
38035 /* Validate that the register used to load the high value is either the
38036 register being loaded, or we can safely replace its use.
38038 This function is only called from the peephole2 pass and we assume that
38039 there are 2 instructions in the peephole (addis and load), so we want to
38040 check if the target register was not used in the memory address and the
38041 register to hold the addis result is dead after the peephole. */
38042 if (REGNO (addis_reg) != REGNO (target))
38044 if (reg_mentioned_p (target, mem))
38045 return false;
38047 if (!peep2_reg_dead_p (2, addis_reg))
38048 return false;
38050 /* If the target register being loaded is the stack pointer, we must
38051 avoid loading any other value into it, even temporarily. */
38052 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38053 return false;
38056 base_reg = XEXP (addr, 0);
38057 return REGNO (addis_reg) == REGNO (base_reg);
38060 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38061 sequence. We adjust the addis register to use the target register. If the
38062 load sign extends, we adjust the code to do the zero extending load, and an
38063 explicit sign extension later since the fusion only covers zero extending
38064 loads.
38066 The operands are:
38067 operands[0] register set with addis (to be replaced with target)
38068 operands[1] value set via addis
38069 operands[2] target register being loaded
38070 operands[3] D-form memory reference using operands[0]. */
38072 void
38073 expand_fusion_gpr_load (rtx *operands)
38075 rtx addis_value = operands[1];
38076 rtx target = operands[2];
38077 rtx orig_mem = operands[3];
38078 rtx new_addr, new_mem, orig_addr, offset;
38079 enum rtx_code plus_or_lo_sum;
38080 machine_mode target_mode = GET_MODE (target);
38081 machine_mode extend_mode = target_mode;
38082 machine_mode ptr_mode = Pmode;
38083 enum rtx_code extend = UNKNOWN;
38085 if (GET_CODE (orig_mem) == ZERO_EXTEND
38086 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38088 extend = GET_CODE (orig_mem);
38089 orig_mem = XEXP (orig_mem, 0);
38090 target_mode = GET_MODE (orig_mem);
38093 gcc_assert (MEM_P (orig_mem));
38095 orig_addr = XEXP (orig_mem, 0);
38096 plus_or_lo_sum = GET_CODE (orig_addr);
38097 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38099 offset = XEXP (orig_addr, 1);
38100 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38101 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38103 if (extend != UNKNOWN)
38104 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38106 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38107 UNSPEC_FUSION_GPR);
38108 emit_insn (gen_rtx_SET (target, new_mem));
38110 if (extend == SIGN_EXTEND)
38112 int sub_off = ((BYTES_BIG_ENDIAN)
38113 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38114 : 0);
38115 rtx sign_reg
38116 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38118 emit_insn (gen_rtx_SET (target,
38119 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38122 return;
38125 /* Emit the addis instruction that will be part of a fused instruction
38126 sequence. */
38128 void
38129 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38130 const char *mode_name)
38132 rtx fuse_ops[10];
38133 char insn_template[80];
38134 const char *addis_str = NULL;
38135 const char *comment_str = ASM_COMMENT_START;
38137 if (*comment_str == ' ')
38138 comment_str++;
38140 /* Emit the addis instruction. */
38141 fuse_ops[0] = target;
38142 if (satisfies_constraint_L (addis_value))
38144 fuse_ops[1] = addis_value;
38145 addis_str = "lis %0,%v1";
38148 else if (GET_CODE (addis_value) == PLUS)
38150 rtx op0 = XEXP (addis_value, 0);
38151 rtx op1 = XEXP (addis_value, 1);
38153 if (REG_P (op0) && CONST_INT_P (op1)
38154 && satisfies_constraint_L (op1))
38156 fuse_ops[1] = op0;
38157 fuse_ops[2] = op1;
38158 addis_str = "addis %0,%1,%v2";
38162 else if (GET_CODE (addis_value) == HIGH)
38164 rtx value = XEXP (addis_value, 0);
38165 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38167 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38168 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38169 if (TARGET_ELF)
38170 addis_str = "addis %0,%2,%1@toc@ha";
38172 else if (TARGET_XCOFF)
38173 addis_str = "addis %0,%1@u(%2)";
38175 else
38176 gcc_unreachable ();
38179 else if (GET_CODE (value) == PLUS)
38181 rtx op0 = XEXP (value, 0);
38182 rtx op1 = XEXP (value, 1);
38184 if (GET_CODE (op0) == UNSPEC
38185 && XINT (op0, 1) == UNSPEC_TOCREL
38186 && CONST_INT_P (op1))
38188 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38189 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38190 fuse_ops[3] = op1;
38191 if (TARGET_ELF)
38192 addis_str = "addis %0,%2,%1+%3@toc@ha";
38194 else if (TARGET_XCOFF)
38195 addis_str = "addis %0,%1+%3@u(%2)";
38197 else
38198 gcc_unreachable ();
38202 else if (satisfies_constraint_L (value))
38204 fuse_ops[1] = value;
38205 addis_str = "lis %0,%v1";
38208 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38210 fuse_ops[1] = value;
38211 addis_str = "lis %0,%1@ha";
38215 if (!addis_str)
38216 fatal_insn ("Could not generate addis value for fusion", addis_value);
38218 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38219 comment, mode_name);
38220 output_asm_insn (insn_template, fuse_ops);
38223 /* Emit a D-form load or store instruction that is the second instruction
38224 of a fusion sequence. */
38226 void
38227 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38228 const char *insn_str)
38230 rtx fuse_ops[10];
38231 char insn_template[80];
38233 fuse_ops[0] = load_store_reg;
38234 fuse_ops[1] = addis_reg;
38236 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38238 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38239 fuse_ops[2] = offset;
38240 output_asm_insn (insn_template, fuse_ops);
38243 else if (GET_CODE (offset) == UNSPEC
38244 && XINT (offset, 1) == UNSPEC_TOCREL)
38246 if (TARGET_ELF)
38247 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38249 else if (TARGET_XCOFF)
38250 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38252 else
38253 gcc_unreachable ();
38255 fuse_ops[2] = XVECEXP (offset, 0, 0);
38256 output_asm_insn (insn_template, fuse_ops);
38259 else if (GET_CODE (offset) == PLUS
38260 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38261 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38262 && CONST_INT_P (XEXP (offset, 1)))
38264 rtx tocrel_unspec = XEXP (offset, 0);
38265 if (TARGET_ELF)
38266 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38268 else if (TARGET_XCOFF)
38269 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38271 else
38272 gcc_unreachable ();
38274 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38275 fuse_ops[3] = XEXP (offset, 1);
38276 output_asm_insn (insn_template, fuse_ops);
38279 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38281 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38283 fuse_ops[2] = offset;
38284 output_asm_insn (insn_template, fuse_ops);
38287 else
38288 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38290 return;
38293 /* Wrap a TOC address that can be fused to indicate that special fusion
38294 processing is needed. */
38297 fusion_wrap_memory_address (rtx old_mem)
38299 rtx old_addr = XEXP (old_mem, 0);
38300 rtvec v = gen_rtvec (1, old_addr);
38301 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38302 return replace_equiv_address_nv (old_mem, new_addr, false);
38305 /* Given an address, convert it into the addis and load offset parts. Addresses
38306 created during the peephole2 process look like:
38307 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38308 (unspec [(...)] UNSPEC_TOCREL))
38310 Addresses created via toc fusion look like:
38311 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38313 static void
38314 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38316 rtx hi, lo;
38318 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38320 lo = XVECEXP (addr, 0, 0);
38321 hi = gen_rtx_HIGH (Pmode, lo);
38323 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38325 hi = XEXP (addr, 0);
38326 lo = XEXP (addr, 1);
38328 else
38329 gcc_unreachable ();
38331 *p_hi = hi;
38332 *p_lo = lo;
38335 /* Return a string to fuse an addis instruction with a gpr load to the same
38336 register that we loaded up the addis instruction. The address that is used
38337 is the logical address that was formed during peephole2:
38338 (lo_sum (high) (low-part))
38340 Or the address is the TOC address that is wrapped before register allocation:
38341 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38343 The code is complicated, so we call output_asm_insn directly, and just
38344 return "". */
38346 const char *
38347 emit_fusion_gpr_load (rtx target, rtx mem)
38349 rtx addis_value;
38350 rtx addr;
38351 rtx load_offset;
38352 const char *load_str = NULL;
38353 const char *mode_name = NULL;
38354 machine_mode mode;
38356 if (GET_CODE (mem) == ZERO_EXTEND)
38357 mem = XEXP (mem, 0);
38359 gcc_assert (REG_P (target) && MEM_P (mem));
38361 addr = XEXP (mem, 0);
38362 fusion_split_address (addr, &addis_value, &load_offset);
38364 /* Now emit the load instruction to the same register. */
38365 mode = GET_MODE (mem);
38366 switch (mode)
38368 case E_QImode:
38369 mode_name = "char";
38370 load_str = "lbz";
38371 break;
38373 case E_HImode:
38374 mode_name = "short";
38375 load_str = "lhz";
38376 break;
38378 case E_SImode:
38379 case E_SFmode:
38380 mode_name = (mode == SFmode) ? "float" : "int";
38381 load_str = "lwz";
38382 break;
38384 case E_DImode:
38385 case E_DFmode:
38386 gcc_assert (TARGET_POWERPC64);
38387 mode_name = (mode == DFmode) ? "double" : "long";
38388 load_str = "ld";
38389 break;
38391 default:
38392 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38395 /* Emit the addis instruction. */
38396 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38398 /* Emit the D-form load instruction. */
38399 emit_fusion_load_store (target, target, load_offset, load_str);
38401 return "";
38405 /* Return true if the peephole2 can combine a load/store involving a
38406 combination of an addis instruction and the memory operation. This was
38407 added to the ISA 3.0 (power9) hardware. */
38409 bool
38410 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38411 rtx addis_value, /* addis value. */
38412 rtx dest, /* destination (memory or register). */
38413 rtx src) /* source (register or memory). */
38415 rtx addr, mem, offset;
38416 machine_mode mode = GET_MODE (src);
38418 /* Validate arguments. */
38419 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38420 return false;
38422 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38423 return false;
38425 /* Ignore extend operations that are part of the load. */
38426 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38427 src = XEXP (src, 0);
38429 /* Test for memory<-register or register<-memory. */
38430 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38432 if (!MEM_P (dest))
38433 return false;
38435 mem = dest;
38438 else if (MEM_P (src))
38440 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38441 return false;
38443 mem = src;
38446 else
38447 return false;
38449 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38450 if (GET_CODE (addr) == PLUS)
38452 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38453 return false;
38455 return satisfies_constraint_I (XEXP (addr, 1));
38458 else if (GET_CODE (addr) == LO_SUM)
38460 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38461 return false;
38463 offset = XEXP (addr, 1);
38464 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38465 return small_toc_ref (offset, GET_MODE (offset));
38467 else if (TARGET_ELF && !TARGET_POWERPC64)
38468 return CONSTANT_P (offset);
38471 return false;
38474 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38475 load sequence.
38477 The operands are:
38478 operands[0] register set with addis
38479 operands[1] value set via addis
38480 operands[2] target register being loaded
38481 operands[3] D-form memory reference using operands[0].
38483 This is similar to the fusion introduced with power8, except it scales to
38484 both loads/stores and does not require the result register to be the same as
38485 the base register. At the moment, we only do this if register set with addis
38486 is dead. */
38488 void
38489 expand_fusion_p9_load (rtx *operands)
38491 rtx tmp_reg = operands[0];
38492 rtx addis_value = operands[1];
38493 rtx target = operands[2];
38494 rtx orig_mem = operands[3];
38495 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38496 enum rtx_code plus_or_lo_sum;
38497 machine_mode target_mode = GET_MODE (target);
38498 machine_mode extend_mode = target_mode;
38499 machine_mode ptr_mode = Pmode;
38500 enum rtx_code extend = UNKNOWN;
38502 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38504 extend = GET_CODE (orig_mem);
38505 orig_mem = XEXP (orig_mem, 0);
38506 target_mode = GET_MODE (orig_mem);
38509 gcc_assert (MEM_P (orig_mem));
38511 orig_addr = XEXP (orig_mem, 0);
38512 plus_or_lo_sum = GET_CODE (orig_addr);
38513 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38515 offset = XEXP (orig_addr, 1);
38516 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38517 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38519 if (extend != UNKNOWN)
38520 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38522 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38523 UNSPEC_FUSION_P9);
38525 set = gen_rtx_SET (target, new_mem);
38526 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38527 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38528 emit_insn (insn);
38530 return;
38533 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38534 store sequence.
38536 The operands are:
38537 operands[0] register set with addis
38538 operands[1] value set via addis
38539 operands[2] target D-form memory being stored to
38540 operands[3] register being stored
38542 This is similar to the fusion introduced with power8, except it scales to
38543 both loads/stores and does not require the result register to be the same as
38544 the base register. At the moment, we only do this if register set with addis
38545 is dead. */
38547 void
38548 expand_fusion_p9_store (rtx *operands)
38550 rtx tmp_reg = operands[0];
38551 rtx addis_value = operands[1];
38552 rtx orig_mem = operands[2];
38553 rtx src = operands[3];
38554 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38555 enum rtx_code plus_or_lo_sum;
38556 machine_mode target_mode = GET_MODE (orig_mem);
38557 machine_mode ptr_mode = Pmode;
38559 gcc_assert (MEM_P (orig_mem));
38561 orig_addr = XEXP (orig_mem, 0);
38562 plus_or_lo_sum = GET_CODE (orig_addr);
38563 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38565 offset = XEXP (orig_addr, 1);
38566 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38567 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38569 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38570 UNSPEC_FUSION_P9);
38572 set = gen_rtx_SET (new_mem, new_src);
38573 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38574 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38575 emit_insn (insn);
38577 return;
38580 /* Return a string to fuse an addis instruction with a load using extended
38581 fusion. The address that is used is the logical address that was formed
38582 during peephole2: (lo_sum (high) (low-part))
38584 The code is complicated, so we call output_asm_insn directly, and just
38585 return "". */
38587 const char *
38588 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38590 machine_mode mode = GET_MODE (reg);
38591 rtx hi;
38592 rtx lo;
38593 rtx addr;
38594 const char *load_string;
38595 int r;
38597 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38599 mem = XEXP (mem, 0);
38600 mode = GET_MODE (mem);
38603 if (GET_CODE (reg) == SUBREG)
38605 gcc_assert (SUBREG_BYTE (reg) == 0);
38606 reg = SUBREG_REG (reg);
38609 if (!REG_P (reg))
38610 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38612 r = REGNO (reg);
38613 if (FP_REGNO_P (r))
38615 if (mode == SFmode)
38616 load_string = "lfs";
38617 else if (mode == DFmode || mode == DImode)
38618 load_string = "lfd";
38619 else
38620 gcc_unreachable ();
38622 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38624 if (mode == SFmode)
38625 load_string = "lxssp";
38626 else if (mode == DFmode || mode == DImode)
38627 load_string = "lxsd";
38628 else
38629 gcc_unreachable ();
38631 else if (INT_REGNO_P (r))
38633 switch (mode)
38635 case E_QImode:
38636 load_string = "lbz";
38637 break;
38638 case E_HImode:
38639 load_string = "lhz";
38640 break;
38641 case E_SImode:
38642 case E_SFmode:
38643 load_string = "lwz";
38644 break;
38645 case E_DImode:
38646 case E_DFmode:
38647 if (!TARGET_POWERPC64)
38648 gcc_unreachable ();
38649 load_string = "ld";
38650 break;
38651 default:
38652 gcc_unreachable ();
38655 else
38656 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38658 if (!MEM_P (mem))
38659 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38661 addr = XEXP (mem, 0);
38662 fusion_split_address (addr, &hi, &lo);
38664 /* Emit the addis instruction. */
38665 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38667 /* Emit the D-form load instruction. */
38668 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38670 return "";
38673 /* Return a string to fuse an addis instruction with a store using extended
38674 fusion. The address that is used is the logical address that was formed
38675 during peephole2: (lo_sum (high) (low-part))
38677 The code is complicated, so we call output_asm_insn directly, and just
38678 return "". */
38680 const char *
38681 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38683 machine_mode mode = GET_MODE (reg);
38684 rtx hi;
38685 rtx lo;
38686 rtx addr;
38687 const char *store_string;
38688 int r;
38690 if (GET_CODE (reg) == SUBREG)
38692 gcc_assert (SUBREG_BYTE (reg) == 0);
38693 reg = SUBREG_REG (reg);
38696 if (!REG_P (reg))
38697 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38699 r = REGNO (reg);
38700 if (FP_REGNO_P (r))
38702 if (mode == SFmode)
38703 store_string = "stfs";
38704 else if (mode == DFmode)
38705 store_string = "stfd";
38706 else
38707 gcc_unreachable ();
38709 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38711 if (mode == SFmode)
38712 store_string = "stxssp";
38713 else if (mode == DFmode || mode == DImode)
38714 store_string = "stxsd";
38715 else
38716 gcc_unreachable ();
38718 else if (INT_REGNO_P (r))
38720 switch (mode)
38722 case E_QImode:
38723 store_string = "stb";
38724 break;
38725 case E_HImode:
38726 store_string = "sth";
38727 break;
38728 case E_SImode:
38729 case E_SFmode:
38730 store_string = "stw";
38731 break;
38732 case E_DImode:
38733 case E_DFmode:
38734 if (!TARGET_POWERPC64)
38735 gcc_unreachable ();
38736 store_string = "std";
38737 break;
38738 default:
38739 gcc_unreachable ();
38742 else
38743 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38745 if (!MEM_P (mem))
38746 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38748 addr = XEXP (mem, 0);
38749 fusion_split_address (addr, &hi, &lo);
38751 /* Emit the addis instruction. */
38752 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38754 /* Emit the D-form load instruction. */
38755 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38757 return "";
38760 #ifdef RS6000_GLIBC_ATOMIC_FENV
38761 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38762 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38763 #endif
38765 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38767 static void
38768 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38770 if (!TARGET_HARD_FLOAT)
38772 #ifdef RS6000_GLIBC_ATOMIC_FENV
38773 if (atomic_hold_decl == NULL_TREE)
38775 atomic_hold_decl
38776 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38777 get_identifier ("__atomic_feholdexcept"),
38778 build_function_type_list (void_type_node,
38779 double_ptr_type_node,
38780 NULL_TREE));
38781 TREE_PUBLIC (atomic_hold_decl) = 1;
38782 DECL_EXTERNAL (atomic_hold_decl) = 1;
38785 if (atomic_clear_decl == NULL_TREE)
38787 atomic_clear_decl
38788 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38789 get_identifier ("__atomic_feclearexcept"),
38790 build_function_type_list (void_type_node,
38791 NULL_TREE));
38792 TREE_PUBLIC (atomic_clear_decl) = 1;
38793 DECL_EXTERNAL (atomic_clear_decl) = 1;
38796 tree const_double = build_qualified_type (double_type_node,
38797 TYPE_QUAL_CONST);
38798 tree const_double_ptr = build_pointer_type (const_double);
38799 if (atomic_update_decl == NULL_TREE)
38801 atomic_update_decl
38802 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38803 get_identifier ("__atomic_feupdateenv"),
38804 build_function_type_list (void_type_node,
38805 const_double_ptr,
38806 NULL_TREE));
38807 TREE_PUBLIC (atomic_update_decl) = 1;
38808 DECL_EXTERNAL (atomic_update_decl) = 1;
38811 tree fenv_var = create_tmp_var_raw (double_type_node);
38812 TREE_ADDRESSABLE (fenv_var) = 1;
38813 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38815 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38816 *clear = build_call_expr (atomic_clear_decl, 0);
38817 *update = build_call_expr (atomic_update_decl, 1,
38818 fold_convert (const_double_ptr, fenv_addr));
38819 #endif
38820 return;
38823 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38824 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38825 tree call_mffs = build_call_expr (mffs, 0);
38827 /* Generates the equivalent of feholdexcept (&fenv_var)
38829 *fenv_var = __builtin_mffs ();
38830 double fenv_hold;
38831 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38832 __builtin_mtfsf (0xff, fenv_hold); */
38834 /* Mask to clear everything except for the rounding modes and non-IEEE
38835 arithmetic flag. */
38836 const unsigned HOST_WIDE_INT hold_exception_mask =
38837 HOST_WIDE_INT_C (0xffffffff00000007);
38839 tree fenv_var = create_tmp_var_raw (double_type_node);
38841 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38843 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38844 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38845 build_int_cst (uint64_type_node,
38846 hold_exception_mask));
38848 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38849 fenv_llu_and);
38851 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38852 build_int_cst (unsigned_type_node, 0xff),
38853 fenv_hold_mtfsf);
38855 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38857 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38859 double fenv_clear = __builtin_mffs ();
38860 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38861 __builtin_mtfsf (0xff, fenv_clear); */
38863 /* Mask to clear everything except for the rounding modes and non-IEEE
38864 arithmetic flag. */
38865 const unsigned HOST_WIDE_INT clear_exception_mask =
38866 HOST_WIDE_INT_C (0xffffffff00000000);
38868 tree fenv_clear = create_tmp_var_raw (double_type_node);
38870 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38872 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38873 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38874 fenv_clean_llu,
38875 build_int_cst (uint64_type_node,
38876 clear_exception_mask));
38878 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38879 fenv_clear_llu_and);
38881 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38882 build_int_cst (unsigned_type_node, 0xff),
38883 fenv_clear_mtfsf);
38885 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38887 /* Generates the equivalent of feupdateenv (&fenv_var)
38889 double old_fenv = __builtin_mffs ();
38890 double fenv_update;
38891 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38892 (*(uint64_t*)fenv_var 0x1ff80fff);
38893 __builtin_mtfsf (0xff, fenv_update); */
38895 const unsigned HOST_WIDE_INT update_exception_mask =
38896 HOST_WIDE_INT_C (0xffffffff1fffff00);
38897 const unsigned HOST_WIDE_INT new_exception_mask =
38898 HOST_WIDE_INT_C (0x1ff80fff);
38900 tree old_fenv = create_tmp_var_raw (double_type_node);
38901 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38903 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38904 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38905 build_int_cst (uint64_type_node,
38906 update_exception_mask));
38908 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38909 build_int_cst (uint64_type_node,
38910 new_exception_mask));
38912 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38913 old_llu_and, new_llu_and);
38915 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38916 new_llu_mask);
38918 tree update_mtfsf = build_call_expr (mtfsf, 2,
38919 build_int_cst (unsigned_type_node, 0xff),
38920 fenv_update_mtfsf);
38922 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38925 void
38926 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38928 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38930 rtx_tmp0 = gen_reg_rtx (V2DImode);
38931 rtx_tmp1 = gen_reg_rtx (V2DImode);
38933 /* The destination of the vmrgew instruction layout is:
38934 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38935 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38936 vmrgew instruction will be correct. */
38937 if (VECTOR_ELT_ORDER_BIG)
38939 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38940 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38942 else
38944 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38945 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38948 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38949 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38951 if (signed_convert)
38953 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38954 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38956 else
38958 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38959 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38962 if (VECTOR_ELT_ORDER_BIG)
38963 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38964 else
38965 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38968 void
38969 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38970 rtx src2)
38972 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38974 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38975 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38977 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38978 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38980 rtx_tmp2 = gen_reg_rtx (V4SImode);
38981 rtx_tmp3 = gen_reg_rtx (V4SImode);
38983 if (signed_convert)
38985 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38986 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38988 else
38990 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38991 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38994 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38997 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38999 static bool
39000 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39001 optimization_type opt_type)
39003 switch (op)
39005 case rsqrt_optab:
39006 return (opt_type == OPTIMIZE_FOR_SPEED
39007 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39009 default:
39010 return true;
39014 struct gcc_target targetm = TARGET_INITIALIZER;
39016 #include "gt-rs6000.h"