Turn SECONDARY_MEMORY_NEEDED_MODE into a target hook
[official-gcc.git] / gcc / config / rs6000 / rs6000.c
bloba4a89d03dfead74502aa56636adad597db481a8b
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "attribs.h"
46 #include "stor-layout.h"
47 #include "calls.h"
48 #include "print-tree.h"
49 #include "varasm.h"
50 #include "explow.h"
51 #include "expr.h"
52 #include "output.h"
53 #include "dbxout.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "sched-int.h"
58 #include "gimplify.h"
59 #include "gimple-fold.h"
60 #include "gimple-iterator.h"
61 #include "gimple-ssa.h"
62 #include "gimple-walk.h"
63 #include "intl.h"
64 #include "params.h"
65 #include "tm-constrs.h"
66 #include "tree-vectorizer.h"
67 #include "target-globals.h"
68 #include "builtins.h"
69 #include "context.h"
70 #include "tree-pass.h"
71 #include "except.h"
72 #if TARGET_XCOFF
73 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
74 #endif
75 #if TARGET_MACHO
76 #include "gstab.h" /* for N_SLINE */
77 #endif
78 #include "case-cfn-macros.h"
79 #include "ppc-auxv.h"
80 #include "tree-ssa-propagate.h"
82 /* This file should be included last. */
83 #include "target-def.h"
85 #ifndef TARGET_NO_PROTOTYPE
86 #define TARGET_NO_PROTOTYPE 0
87 #endif
89 #define min(A,B) ((A) < (B) ? (A) : (B))
90 #define max(A,B) ((A) > (B) ? (A) : (B))
92 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
94 /* Structure used to define the rs6000 stack */
95 typedef struct rs6000_stack {
96 int reload_completed; /* stack info won't change from here on */
97 int first_gp_reg_save; /* first callee saved GP register used */
98 int first_fp_reg_save; /* first callee saved FP register used */
99 int first_altivec_reg_save; /* first callee saved AltiVec register used */
100 int lr_save_p; /* true if the link reg needs to be saved */
101 int cr_save_p; /* true if the CR reg needs to be saved */
102 unsigned int vrsave_mask; /* mask of vec registers to save */
103 int push_p; /* true if we need to allocate stack space */
104 int calls_p; /* true if the function makes any calls */
105 int world_save_p; /* true if we're saving *everything*:
106 r13-r31, cr, f14-f31, vrsave, v20-v31 */
107 enum rs6000_abi abi; /* which ABI to use */
108 int gp_save_offset; /* offset to save GP regs from initial SP */
109 int fp_save_offset; /* offset to save FP regs from initial SP */
110 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
111 int lr_save_offset; /* offset to save LR from initial SP */
112 int cr_save_offset; /* offset to save CR from initial SP */
113 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
114 int varargs_save_offset; /* offset to save the varargs registers */
115 int ehrd_offset; /* offset to EH return data */
116 int ehcr_offset; /* offset to EH CR field data */
117 int reg_size; /* register size (4 or 8) */
118 HOST_WIDE_INT vars_size; /* variable save area size */
119 int parm_size; /* outgoing parameter size */
120 int save_size; /* save area size */
121 int fixed_size; /* fixed size of stack frame */
122 int gp_size; /* size of saved GP registers */
123 int fp_size; /* size of saved FP registers */
124 int altivec_size; /* size of saved AltiVec registers */
125 int cr_size; /* size to hold CR if not in fixed area */
126 int vrsave_size; /* size to hold VRSAVE */
127 int altivec_padding_size; /* size of altivec alignment padding */
128 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
129 int savres_strategy;
130 } rs6000_stack_t;
132 /* A C structure for machine-specific, per-function data.
133 This is added to the cfun structure. */
134 typedef struct GTY(()) machine_function
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame;
138 /* Flags if __builtin_return_address (0) was used. */
139 int ra_need_lr;
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
141 int lr_save_state;
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset;
148 /* Alternative internal arg pointer for -fsplit-stack. */
149 rtx split_stack_arg_pointer;
150 bool split_stack_argp_used;
151 /* Flag if r2 setup is needed with ELFv2 ABI. */
152 bool r2_setup_needed;
153 /* The number of components we use for separate shrink-wrapping. */
154 int n_components;
155 /* The components already handled by separate shrink-wrapping, which should
156 not be considered by the prologue and epilogue. */
157 bool gpr_is_wrapped_separately[32];
158 bool fpr_is_wrapped_separately[32];
159 bool lr_is_wrapped_separately;
160 } machine_function;
162 /* Support targetm.vectorize.builtin_mask_for_load. */
163 static GTY(()) tree altivec_builtin_mask_for_load;
165 /* Set to nonzero once AIX common-mode calls have been defined. */
166 static GTY(()) int common_mode_defined;
168 /* Label number of label created for -mrelocatable, to call to so we can
169 get the address of the GOT section */
170 static int rs6000_pic_labelno;
172 #ifdef USING_ELFOS_H
173 /* Counter for labels which are to be placed in .fixup. */
174 int fixuplabelno = 0;
175 #endif
177 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
178 int dot_symbols;
180 /* Specify the machine mode that pointers have. After generation of rtl, the
181 compiler makes no further distinction between pointers and any other objects
182 of this machine mode. */
183 scalar_int_mode rs6000_pmode;
185 /* Width in bits of a pointer. */
186 unsigned rs6000_pointer_size;
188 #ifdef HAVE_AS_GNU_ATTRIBUTE
189 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
190 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
191 # endif
192 /* Flag whether floating point values have been passed/returned.
193 Note that this doesn't say whether fprs are used, since the
194 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
195 should be set for soft-float values passed in gprs and ieee128
196 values passed in vsx registers. */
197 static bool rs6000_passes_float;
198 static bool rs6000_passes_long_double;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct;
203 #endif
205 /* Value is TRUE if register/mode pair is acceptable. */
206 static bool rs6000_hard_regno_mode_ok_p
207 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
209 /* Maximum number of registers needed for a given register class and mode. */
210 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
212 /* How many registers are needed for a given register and mode. */
213 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
215 /* Map register number to register class. */
216 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
218 static int dbg_cost_ctrl;
220 /* Built in types. */
221 tree rs6000_builtin_types[RS6000_BTI_MAX];
222 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
224 /* Flag to say the TOC is initialized */
225 int toc_initialized, need_toc_init;
226 char toc_label_name[10];
228 /* Cached value of rs6000_variable_issue. This is cached in
229 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
230 static short cached_can_issue_more;
232 static GTY(()) section *read_only_data_section;
233 static GTY(()) section *private_data_section;
234 static GTY(()) section *tls_data_section;
235 static GTY(()) section *tls_private_data_section;
236 static GTY(()) section *read_only_private_data_section;
237 static GTY(()) section *sdata2_section;
238 static GTY(()) section *toc_section;
240 struct builtin_description
242 const HOST_WIDE_INT mask;
243 const enum insn_code icode;
244 const char *const name;
245 const enum rs6000_builtins code;
248 /* Describe the vector unit used for modes. */
249 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
250 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
252 /* Register classes for various constraints that are based on the target
253 switches. */
254 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
256 /* Describe the alignment of a vector. */
257 int rs6000_vector_align[NUM_MACHINE_MODES];
259 /* Map selected modes to types for builtins. */
260 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
262 /* What modes to automatically generate reciprocal divide estimate (fre) and
263 reciprocal sqrt (frsqrte) for. */
264 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
266 /* Masks to determine which reciprocal esitmate instructions to generate
267 automatically. */
268 enum rs6000_recip_mask {
269 RECIP_SF_DIV = 0x001, /* Use divide estimate */
270 RECIP_DF_DIV = 0x002,
271 RECIP_V4SF_DIV = 0x004,
272 RECIP_V2DF_DIV = 0x008,
274 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
275 RECIP_DF_RSQRT = 0x020,
276 RECIP_V4SF_RSQRT = 0x040,
277 RECIP_V2DF_RSQRT = 0x080,
279 /* Various combination of flags for -mrecip=xxx. */
280 RECIP_NONE = 0,
281 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
282 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
283 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
285 RECIP_HIGH_PRECISION = RECIP_ALL,
287 /* On low precision machines like the power5, don't enable double precision
288 reciprocal square root estimate, since it isn't accurate enough. */
289 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
292 /* -mrecip options. */
293 static struct
295 const char *string; /* option name */
296 unsigned int mask; /* mask bits to set */
297 } recip_options[] = {
298 { "all", RECIP_ALL },
299 { "none", RECIP_NONE },
300 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
301 | RECIP_V2DF_DIV) },
302 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
303 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
304 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
305 | RECIP_V2DF_RSQRT) },
306 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
307 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
310 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
311 static const struct
313 const char *cpu;
314 unsigned int cpuid;
315 } cpu_is_info[] = {
316 { "power9", PPC_PLATFORM_POWER9 },
317 { "power8", PPC_PLATFORM_POWER8 },
318 { "power7", PPC_PLATFORM_POWER7 },
319 { "power6x", PPC_PLATFORM_POWER6X },
320 { "power6", PPC_PLATFORM_POWER6 },
321 { "power5+", PPC_PLATFORM_POWER5_PLUS },
322 { "power5", PPC_PLATFORM_POWER5 },
323 { "ppc970", PPC_PLATFORM_PPC970 },
324 { "power4", PPC_PLATFORM_POWER4 },
325 { "ppca2", PPC_PLATFORM_PPCA2 },
326 { "ppc476", PPC_PLATFORM_PPC476 },
327 { "ppc464", PPC_PLATFORM_PPC464 },
328 { "ppc440", PPC_PLATFORM_PPC440 },
329 { "ppc405", PPC_PLATFORM_PPC405 },
330 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
333 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
334 static const struct
336 const char *hwcap;
337 int mask;
338 unsigned int id;
339 } cpu_supports_info[] = {
340 /* AT_HWCAP masks. */
341 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
342 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
343 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
344 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
345 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
346 { "booke", PPC_FEATURE_BOOKE, 0 },
347 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
348 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
349 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
350 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
351 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
352 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
353 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
354 { "notb", PPC_FEATURE_NO_TB, 0 },
355 { "pa6t", PPC_FEATURE_PA6T, 0 },
356 { "power4", PPC_FEATURE_POWER4, 0 },
357 { "power5", PPC_FEATURE_POWER5, 0 },
358 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
359 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
360 { "ppc32", PPC_FEATURE_32, 0 },
361 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
362 { "ppc64", PPC_FEATURE_64, 0 },
363 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
364 { "smt", PPC_FEATURE_SMT, 0 },
365 { "spe", PPC_FEATURE_HAS_SPE, 0 },
366 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
367 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
368 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
370 /* AT_HWCAP2 masks. */
371 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
372 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
373 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
374 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
375 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
376 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
377 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
378 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
379 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
380 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
381 { "darn", PPC_FEATURE2_DARN, 1 },
382 { "scv", PPC_FEATURE2_SCV, 1 }
385 /* On PowerPC, we have a limited number of target clones that we care about
386 which means we can use an array to hold the options, rather than having more
387 elaborate data structures to identify each possible variation. Order the
388 clones from the default to the highest ISA. */
389 enum {
390 CLONE_DEFAULT = 0, /* default clone. */
391 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
392 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
393 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
394 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
395 CLONE_MAX
398 /* Map compiler ISA bits into HWCAP names. */
399 struct clone_map {
400 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
401 const char *name; /* name to use in __builtin_cpu_supports. */
404 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
405 { 0, "" }, /* Default options. */
406 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
407 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
408 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
409 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
413 /* Newer LIBCs explicitly export this symbol to declare that they provide
414 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
415 reference to this symbol whenever we expand a CPU builtin, so that
416 we never link against an old LIBC. */
417 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
419 /* True if we have expanded a CPU builtin. */
420 bool cpu_builtin_p;
422 /* Pointer to function (in rs6000-c.c) that can define or undefine target
423 macros that have changed. Languages that don't support the preprocessor
424 don't link in rs6000-c.c, so we can't call it directly. */
425 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
427 /* Simplfy register classes into simpler classifications. We assume
428 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
429 check for standard register classes (gpr/floating/altivec/vsx) and
430 floating/vector classes (float/altivec/vsx). */
432 enum rs6000_reg_type {
433 NO_REG_TYPE,
434 PSEUDO_REG_TYPE,
435 GPR_REG_TYPE,
436 VSX_REG_TYPE,
437 ALTIVEC_REG_TYPE,
438 FPR_REG_TYPE,
439 SPR_REG_TYPE,
440 CR_REG_TYPE
443 /* Map register class to register type. */
444 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
446 /* First/last register type for the 'normal' register types (i.e. general
447 purpose, floating point, altivec, and VSX registers). */
448 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
450 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
453 /* Register classes we care about in secondary reload or go if legitimate
454 address. We only need to worry about GPR, FPR, and Altivec registers here,
455 along an ANY field that is the OR of the 3 register classes. */
457 enum rs6000_reload_reg_type {
458 RELOAD_REG_GPR, /* General purpose registers. */
459 RELOAD_REG_FPR, /* Traditional floating point regs. */
460 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
461 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
462 N_RELOAD_REG
465 /* For setting up register classes, loop through the 3 register classes mapping
466 into real registers, and skip the ANY class, which is just an OR of the
467 bits. */
468 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
469 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
471 /* Map reload register type to a register in the register class. */
472 struct reload_reg_map_type {
473 const char *name; /* Register class name. */
474 int reg; /* Register in the register class. */
477 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
478 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
479 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
480 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
481 { "Any", -1 }, /* RELOAD_REG_ANY. */
484 /* Mask bits for each register class, indexed per mode. Historically the
485 compiler has been more restrictive which types can do PRE_MODIFY instead of
486 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
487 typedef unsigned char addr_mask_type;
489 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
490 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
491 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
492 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
493 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
494 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
495 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
496 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
498 /* Register type masks based on the type, of valid addressing modes. */
499 struct rs6000_reg_addr {
500 enum insn_code reload_load; /* INSN to reload for loading. */
501 enum insn_code reload_store; /* INSN to reload for storing. */
502 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
503 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
504 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
505 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
506 /* INSNs for fusing addi with loads
507 or stores for each reg. class. */
508 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
509 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
510 /* INSNs for fusing addis with loads
511 or stores for each reg. class. */
512 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
513 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
514 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
515 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
516 bool fused_toc; /* Mode supports TOC fusion. */
519 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
521 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
522 static inline bool
523 mode_supports_pre_incdec_p (machine_mode mode)
525 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
526 != 0);
529 /* Helper function to say whether a mode supports PRE_MODIFY. */
530 static inline bool
531 mode_supports_pre_modify_p (machine_mode mode)
533 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
534 != 0);
537 /* Given that there exists at least one variable that is set (produced)
538 by OUT_INSN and read (consumed) by IN_INSN, return true iff
539 IN_INSN represents one or more memory store operations and none of
540 the variables set by OUT_INSN is used by IN_INSN as the address of a
541 store operation. If either IN_INSN or OUT_INSN does not represent
542 a "single" RTL SET expression (as loosely defined by the
543 implementation of the single_set function) or a PARALLEL with only
544 SETs, CLOBBERs, and USEs inside, this function returns false.
546 This rs6000-specific version of store_data_bypass_p checks for
547 certain conditions that result in assertion failures (and internal
548 compiler errors) in the generic store_data_bypass_p function and
549 returns false rather than calling store_data_bypass_p if one of the
550 problematic conditions is detected. */
553 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
555 rtx out_set, in_set;
556 rtx out_pat, in_pat;
557 rtx out_exp, in_exp;
558 int i, j;
560 in_set = single_set (in_insn);
561 if (in_set)
563 if (MEM_P (SET_DEST (in_set)))
565 out_set = single_set (out_insn);
566 if (!out_set)
568 out_pat = PATTERN (out_insn);
569 if (GET_CODE (out_pat) == PARALLEL)
571 for (i = 0; i < XVECLEN (out_pat, 0); i++)
573 out_exp = XVECEXP (out_pat, 0, i);
574 if ((GET_CODE (out_exp) == CLOBBER)
575 || (GET_CODE (out_exp) == USE))
576 continue;
577 else if (GET_CODE (out_exp) != SET)
578 return false;
584 else
586 in_pat = PATTERN (in_insn);
587 if (GET_CODE (in_pat) != PARALLEL)
588 return false;
590 for (i = 0; i < XVECLEN (in_pat, 0); i++)
592 in_exp = XVECEXP (in_pat, 0, i);
593 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
594 continue;
595 else if (GET_CODE (in_exp) != SET)
596 return false;
598 if (MEM_P (SET_DEST (in_exp)))
600 out_set = single_set (out_insn);
601 if (!out_set)
603 out_pat = PATTERN (out_insn);
604 if (GET_CODE (out_pat) != PARALLEL)
605 return false;
606 for (j = 0; j < XVECLEN (out_pat, 0); j++)
608 out_exp = XVECEXP (out_pat, 0, j);
609 if ((GET_CODE (out_exp) == CLOBBER)
610 || (GET_CODE (out_exp) == USE))
611 continue;
612 else if (GET_CODE (out_exp) != SET)
613 return false;
619 return store_data_bypass_p (out_insn, in_insn);
622 /* Return true if we have D-form addressing in altivec registers. */
623 static inline bool
624 mode_supports_vmx_dform (machine_mode mode)
626 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
629 /* Return true if we have D-form addressing in VSX registers. This addressing
630 is more limited than normal d-form addressing in that the offset must be
631 aligned on a 16-byte boundary. */
632 static inline bool
633 mode_supports_vsx_dform_quad (machine_mode mode)
635 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
636 != 0);
640 /* Target cpu costs. */
642 struct processor_costs {
643 const int mulsi; /* cost of SImode multiplication. */
644 const int mulsi_const; /* cost of SImode multiplication by constant. */
645 const int mulsi_const9; /* cost of SImode mult by short constant. */
646 const int muldi; /* cost of DImode multiplication. */
647 const int divsi; /* cost of SImode division. */
648 const int divdi; /* cost of DImode division. */
649 const int fp; /* cost of simple SFmode and DFmode insns. */
650 const int dmul; /* cost of DFmode multiplication (and fmadd). */
651 const int sdiv; /* cost of SFmode division (fdivs). */
652 const int ddiv; /* cost of DFmode division (fdiv). */
653 const int cache_line_size; /* cache line size in bytes. */
654 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
655 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
656 const int simultaneous_prefetches; /* number of parallel prefetch
657 operations. */
658 const int sfdf_convert; /* cost of SF->DF conversion. */
661 const struct processor_costs *rs6000_cost;
663 /* Processor costs (relative to an add) */
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_Q
1277 #undef RS6000_BUILTIN_X
1279 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1282 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1285 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1288 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1291 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1294 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1297 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1300 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1303 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1304 { NAME, ICODE, MASK, ATTR },
1306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1307 { NAME, ICODE, MASK, ATTR },
1309 struct rs6000_builtin_info_type {
1310 const char *name;
1311 const enum insn_code icode;
1312 const HOST_WIDE_INT mask;
1313 const unsigned attr;
1316 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1318 #include "rs6000-builtin.def"
1321 #undef RS6000_BUILTIN_0
1322 #undef RS6000_BUILTIN_1
1323 #undef RS6000_BUILTIN_2
1324 #undef RS6000_BUILTIN_3
1325 #undef RS6000_BUILTIN_A
1326 #undef RS6000_BUILTIN_D
1327 #undef RS6000_BUILTIN_H
1328 #undef RS6000_BUILTIN_P
1329 #undef RS6000_BUILTIN_Q
1330 #undef RS6000_BUILTIN_X
1332 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1333 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1336 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1337 static struct machine_function * rs6000_init_machine_status (void);
1338 static int rs6000_ra_ever_killed (void);
1339 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1340 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1341 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1342 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1343 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1344 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1345 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1346 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1347 bool);
1348 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1349 unsigned int);
1350 static bool is_microcoded_insn (rtx_insn *);
1351 static bool is_nonpipeline_insn (rtx_insn *);
1352 static bool is_cracked_insn (rtx_insn *);
1353 static bool is_load_insn (rtx, rtx *);
1354 static bool is_store_insn (rtx, rtx *);
1355 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1356 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1357 static bool insn_must_be_first_in_group (rtx_insn *);
1358 static bool insn_must_be_last_in_group (rtx_insn *);
1359 static void altivec_init_builtins (void);
1360 static tree builtin_function_type (machine_mode, machine_mode,
1361 machine_mode, machine_mode,
1362 enum rs6000_builtins, const char *name);
1363 static void rs6000_common_init_builtins (void);
1364 static void paired_init_builtins (void);
1365 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1366 static void htm_init_builtins (void);
1367 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1368 static rs6000_stack_t *rs6000_stack_info (void);
1369 static void is_altivec_return_reg (rtx, void *);
1370 int easy_vector_constant (rtx, machine_mode);
1371 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1372 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1373 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1374 bool, bool);
1375 #if TARGET_MACHO
1376 static void macho_branch_islands (void);
1377 #endif
1378 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1379 int, int *);
1380 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1381 int, int, int *);
1382 static bool rs6000_mode_dependent_address (const_rtx);
1383 static bool rs6000_debug_mode_dependent_address (const_rtx);
1384 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1385 machine_mode, rtx);
1386 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1387 machine_mode,
1388 rtx);
1389 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1390 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1391 enum reg_class);
1392 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1393 machine_mode);
1394 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1395 enum reg_class,
1396 machine_mode);
1397 static bool rs6000_cannot_change_mode_class (machine_mode,
1398 machine_mode,
1399 enum reg_class);
1400 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1401 machine_mode,
1402 enum reg_class);
1403 static bool rs6000_save_toc_in_prologue_p (void);
1404 static rtx rs6000_internal_arg_pointer (void);
1406 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1407 int, int *)
1408 = rs6000_legitimize_reload_address;
1410 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1411 = rs6000_mode_dependent_address;
1413 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1414 machine_mode, rtx)
1415 = rs6000_secondary_reload_class;
1417 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1418 = rs6000_preferred_reload_class;
1420 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1421 machine_mode)
1422 = rs6000_secondary_memory_needed;
1424 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1425 machine_mode,
1426 enum reg_class)
1427 = rs6000_cannot_change_mode_class;
1429 const int INSN_NOT_AVAILABLE = -1;
1431 static void rs6000_print_isa_options (FILE *, int, const char *,
1432 HOST_WIDE_INT);
1433 static void rs6000_print_builtin_options (FILE *, int, const char *,
1434 HOST_WIDE_INT);
1435 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1437 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1438 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1439 enum rs6000_reg_type,
1440 machine_mode,
1441 secondary_reload_info *,
1442 bool);
1443 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1444 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1445 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1447 /* Hash table stuff for keeping track of TOC entries. */
1449 struct GTY((for_user)) toc_hash_struct
1451 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1452 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1453 rtx key;
1454 machine_mode key_mode;
1455 int labelno;
1458 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1460 static hashval_t hash (toc_hash_struct *);
1461 static bool equal (toc_hash_struct *, toc_hash_struct *);
1464 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1466 /* Hash table to keep track of the argument types for builtin functions. */
1468 struct GTY((for_user)) builtin_hash_struct
1470 tree type;
1471 machine_mode mode[4]; /* return value + 3 arguments. */
1472 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1475 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1477 static hashval_t hash (builtin_hash_struct *);
1478 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1481 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1484 /* Default register names. */
1485 char rs6000_reg_names[][8] =
1487 "0", "1", "2", "3", "4", "5", "6", "7",
1488 "8", "9", "10", "11", "12", "13", "14", "15",
1489 "16", "17", "18", "19", "20", "21", "22", "23",
1490 "24", "25", "26", "27", "28", "29", "30", "31",
1491 "0", "1", "2", "3", "4", "5", "6", "7",
1492 "8", "9", "10", "11", "12", "13", "14", "15",
1493 "16", "17", "18", "19", "20", "21", "22", "23",
1494 "24", "25", "26", "27", "28", "29", "30", "31",
1495 "mq", "lr", "ctr","ap",
1496 "0", "1", "2", "3", "4", "5", "6", "7",
1497 "ca",
1498 /* AltiVec registers. */
1499 "0", "1", "2", "3", "4", "5", "6", "7",
1500 "8", "9", "10", "11", "12", "13", "14", "15",
1501 "16", "17", "18", "19", "20", "21", "22", "23",
1502 "24", "25", "26", "27", "28", "29", "30", "31",
1503 "vrsave", "vscr",
1504 /* Soft frame pointer. */
1505 "sfp",
1506 /* HTM SPR registers. */
1507 "tfhar", "tfiar", "texasr"
1510 #ifdef TARGET_REGNAMES
1511 static const char alt_reg_names[][8] =
1513 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1514 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1515 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1516 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1517 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1518 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1519 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1520 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1521 "mq", "lr", "ctr", "ap",
1522 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1523 "ca",
1524 /* AltiVec registers. */
1525 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1526 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1527 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1528 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1529 "vrsave", "vscr",
1530 /* Soft frame pointer. */
1531 "sfp",
1532 /* HTM SPR registers. */
1533 "tfhar", "tfiar", "texasr"
1535 #endif
1537 /* Table of valid machine attributes. */
1539 static const struct attribute_spec rs6000_attribute_table[] =
1541 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1542 affects_type_identity } */
1543 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1544 false },
1545 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1546 false },
1547 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1548 false },
1549 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1550 false },
1551 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1552 false },
1553 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1554 SUBTARGET_ATTRIBUTE_TABLE,
1555 #endif
1556 { NULL, 0, 0, false, false, false, NULL, false }
1559 #ifndef TARGET_PROFILE_KERNEL
1560 #define TARGET_PROFILE_KERNEL 0
1561 #endif
1563 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1564 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1566 /* Initialize the GCC target structure. */
1567 #undef TARGET_ATTRIBUTE_TABLE
1568 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1569 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1570 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1571 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1572 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1577 /* Default unaligned ops are only provided for ELF. Find the ops needed
1578 for non-ELF systems. */
1579 #ifndef OBJECT_FORMAT_ELF
1580 #if TARGET_XCOFF
1581 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1582 64-bit targets. */
1583 #undef TARGET_ASM_UNALIGNED_HI_OP
1584 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1585 #undef TARGET_ASM_UNALIGNED_SI_OP
1586 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1587 #undef TARGET_ASM_UNALIGNED_DI_OP
1588 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1589 #else
1590 /* For Darwin. */
1591 #undef TARGET_ASM_UNALIGNED_HI_OP
1592 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1593 #undef TARGET_ASM_UNALIGNED_SI_OP
1594 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1595 #undef TARGET_ASM_UNALIGNED_DI_OP
1596 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1597 #undef TARGET_ASM_ALIGNED_DI_OP
1598 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1599 #endif
1600 #endif
1602 /* This hook deals with fixups for relocatable code and DI-mode objects
1603 in 64-bit code. */
1604 #undef TARGET_ASM_INTEGER
1605 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1607 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1608 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1609 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1610 #endif
1612 #undef TARGET_SET_UP_BY_PROLOGUE
1613 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1615 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1616 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1617 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1618 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1619 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1620 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1621 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1622 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1623 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1624 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1625 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1626 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1628 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1629 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1631 #undef TARGET_INTERNAL_ARG_POINTER
1632 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1634 #undef TARGET_HAVE_TLS
1635 #define TARGET_HAVE_TLS HAVE_AS_TLS
1637 #undef TARGET_CANNOT_FORCE_CONST_MEM
1638 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1640 #undef TARGET_DELEGITIMIZE_ADDRESS
1641 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1643 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1644 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1646 #undef TARGET_LEGITIMATE_COMBINED_INSN
1647 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1649 #undef TARGET_ASM_FUNCTION_PROLOGUE
1650 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1651 #undef TARGET_ASM_FUNCTION_EPILOGUE
1652 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1654 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1655 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1657 #undef TARGET_LEGITIMIZE_ADDRESS
1658 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1660 #undef TARGET_SCHED_VARIABLE_ISSUE
1661 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1663 #undef TARGET_SCHED_ISSUE_RATE
1664 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1665 #undef TARGET_SCHED_ADJUST_COST
1666 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1667 #undef TARGET_SCHED_ADJUST_PRIORITY
1668 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1669 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1670 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1671 #undef TARGET_SCHED_INIT
1672 #define TARGET_SCHED_INIT rs6000_sched_init
1673 #undef TARGET_SCHED_FINISH
1674 #define TARGET_SCHED_FINISH rs6000_sched_finish
1675 #undef TARGET_SCHED_REORDER
1676 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1677 #undef TARGET_SCHED_REORDER2
1678 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1680 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1681 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1683 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1684 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1686 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1687 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1688 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1689 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1690 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1691 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1692 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1693 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1695 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1696 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1698 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1699 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1700 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1701 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1702 rs6000_builtin_support_vector_misalignment
1703 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1704 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1705 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1706 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1707 rs6000_builtin_vectorization_cost
1708 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1709 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1710 rs6000_preferred_simd_mode
1711 #undef TARGET_VECTORIZE_INIT_COST
1712 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1713 #undef TARGET_VECTORIZE_ADD_STMT_COST
1714 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1715 #undef TARGET_VECTORIZE_FINISH_COST
1716 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1717 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1718 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1720 #undef TARGET_INIT_BUILTINS
1721 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1722 #undef TARGET_BUILTIN_DECL
1723 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1725 #undef TARGET_FOLD_BUILTIN
1726 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1727 #undef TARGET_GIMPLE_FOLD_BUILTIN
1728 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1730 #undef TARGET_EXPAND_BUILTIN
1731 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1733 #undef TARGET_MANGLE_TYPE
1734 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1736 #undef TARGET_INIT_LIBFUNCS
1737 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1739 #if TARGET_MACHO
1740 #undef TARGET_BINDS_LOCAL_P
1741 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1742 #endif
1744 #undef TARGET_MS_BITFIELD_LAYOUT_P
1745 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1747 #undef TARGET_ASM_OUTPUT_MI_THUNK
1748 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1750 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1751 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1753 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1754 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1756 #undef TARGET_REGISTER_MOVE_COST
1757 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1758 #undef TARGET_MEMORY_MOVE_COST
1759 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1760 #undef TARGET_CANNOT_COPY_INSN_P
1761 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1762 #undef TARGET_RTX_COSTS
1763 #define TARGET_RTX_COSTS rs6000_rtx_costs
1764 #undef TARGET_ADDRESS_COST
1765 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1767 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1768 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1770 #undef TARGET_PROMOTE_FUNCTION_MODE
1771 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1773 #undef TARGET_RETURN_IN_MEMORY
1774 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1776 #undef TARGET_RETURN_IN_MSB
1777 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1779 #undef TARGET_SETUP_INCOMING_VARARGS
1780 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1782 /* Always strict argument naming on rs6000. */
1783 #undef TARGET_STRICT_ARGUMENT_NAMING
1784 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1785 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1786 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1787 #undef TARGET_SPLIT_COMPLEX_ARG
1788 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1789 #undef TARGET_MUST_PASS_IN_STACK
1790 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1791 #undef TARGET_PASS_BY_REFERENCE
1792 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1793 #undef TARGET_ARG_PARTIAL_BYTES
1794 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1795 #undef TARGET_FUNCTION_ARG_ADVANCE
1796 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1797 #undef TARGET_FUNCTION_ARG
1798 #define TARGET_FUNCTION_ARG rs6000_function_arg
1799 #undef TARGET_FUNCTION_ARG_PADDING
1800 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1801 #undef TARGET_FUNCTION_ARG_BOUNDARY
1802 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1804 #undef TARGET_BUILD_BUILTIN_VA_LIST
1805 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1807 #undef TARGET_EXPAND_BUILTIN_VA_START
1808 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1810 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1811 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1813 #undef TARGET_EH_RETURN_FILTER_MODE
1814 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1816 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1817 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1819 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1820 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1822 #undef TARGET_FLOATN_MODE
1823 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1825 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1826 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1828 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1829 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1831 #undef TARGET_MD_ASM_ADJUST
1832 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1834 #undef TARGET_OPTION_OVERRIDE
1835 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1837 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1838 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1839 rs6000_builtin_vectorized_function
1841 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1842 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1843 rs6000_builtin_md_vectorized_function
1845 #undef TARGET_STACK_PROTECT_GUARD
1846 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1848 #if !TARGET_MACHO
1849 #undef TARGET_STACK_PROTECT_FAIL
1850 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1851 #endif
1853 #ifdef HAVE_AS_TLS
1854 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1855 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1856 #endif
1858 /* Use a 32-bit anchor range. This leads to sequences like:
1860 addis tmp,anchor,high
1861 add dest,tmp,low
1863 where tmp itself acts as an anchor, and can be shared between
1864 accesses to the same 64k page. */
1865 #undef TARGET_MIN_ANCHOR_OFFSET
1866 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1867 #undef TARGET_MAX_ANCHOR_OFFSET
1868 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1869 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1870 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1871 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1872 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1874 #undef TARGET_BUILTIN_RECIPROCAL
1875 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1877 #undef TARGET_SECONDARY_RELOAD
1878 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1879 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1880 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1882 #undef TARGET_LEGITIMATE_ADDRESS_P
1883 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1885 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1886 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1888 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1889 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1891 #undef TARGET_CAN_ELIMINATE
1892 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1894 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1895 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1897 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1898 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1900 #undef TARGET_TRAMPOLINE_INIT
1901 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1903 #undef TARGET_FUNCTION_VALUE
1904 #define TARGET_FUNCTION_VALUE rs6000_function_value
1906 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1907 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1909 #undef TARGET_OPTION_SAVE
1910 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1912 #undef TARGET_OPTION_RESTORE
1913 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1915 #undef TARGET_OPTION_PRINT
1916 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1918 #undef TARGET_CAN_INLINE_P
1919 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1921 #undef TARGET_SET_CURRENT_FUNCTION
1922 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1924 #undef TARGET_LEGITIMATE_CONSTANT_P
1925 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1927 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1928 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1930 #undef TARGET_CAN_USE_DOLOOP_P
1931 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1933 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1934 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1936 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1937 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1938 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1939 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1940 #undef TARGET_UNWIND_WORD_MODE
1941 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1943 #undef TARGET_OFFLOAD_OPTIONS
1944 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1946 #undef TARGET_C_MODE_FOR_SUFFIX
1947 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1949 #undef TARGET_INVALID_BINARY_OP
1950 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1952 #undef TARGET_OPTAB_SUPPORTED_P
1953 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1955 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1956 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1958 #undef TARGET_COMPARE_VERSION_PRIORITY
1959 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1961 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1962 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1963 rs6000_generate_version_dispatcher_body
1965 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1966 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1967 rs6000_get_function_versions_dispatcher
1969 #undef TARGET_OPTION_FUNCTION_VERSIONS
1970 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1972 #undef TARGET_HARD_REGNO_NREGS
1973 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1974 #undef TARGET_HARD_REGNO_MODE_OK
1975 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1977 #undef TARGET_MODES_TIEABLE_P
1978 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1980 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1981 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1982 rs6000_hard_regno_call_part_clobbered
1984 #undef TARGET_SLOW_UNALIGNED_ACCESS
1985 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1988 /* Processor table. */
1989 struct rs6000_ptt
1991 const char *const name; /* Canonical processor name. */
1992 const enum processor_type processor; /* Processor type enum value. */
1993 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1996 static struct rs6000_ptt const processor_target_table[] =
1998 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1999 #include "rs6000-cpus.def"
2000 #undef RS6000_CPU
2003 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2004 name is invalid. */
2006 static int
2007 rs6000_cpu_name_lookup (const char *name)
2009 size_t i;
2011 if (name != NULL)
2013 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2014 if (! strcmp (name, processor_target_table[i].name))
2015 return (int)i;
2018 return -1;
2022 /* Return number of consecutive hard regs needed starting at reg REGNO
2023 to hold something of mode MODE.
2024 This is ordinarily the length in words of a value of mode MODE
2025 but can be less for certain modes in special long registers.
2027 POWER and PowerPC GPRs hold 32 bits worth;
2028 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2030 static int
2031 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2033 unsigned HOST_WIDE_INT reg_size;
2035 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2036 128-bit floating point that can go in vector registers, which has VSX
2037 memory addressing. */
2038 if (FP_REGNO_P (regno))
2039 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2040 ? UNITS_PER_VSX_WORD
2041 : UNITS_PER_FP_WORD);
2043 else if (ALTIVEC_REGNO_P (regno))
2044 reg_size = UNITS_PER_ALTIVEC_WORD;
2046 else
2047 reg_size = UNITS_PER_WORD;
2049 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2052 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2053 MODE. */
2054 static int
2055 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2057 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2059 if (COMPLEX_MODE_P (mode))
2060 mode = GET_MODE_INNER (mode);
2062 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2063 register combinations, and use PTImode where we need to deal with quad
2064 word memory operations. Don't allow quad words in the argument or frame
2065 pointer registers, just registers 0..31. */
2066 if (mode == PTImode)
2067 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2068 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2069 && ((regno & 1) == 0));
2071 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2072 implementations. Don't allow an item to be split between a FP register
2073 and an Altivec register. Allow TImode in all VSX registers if the user
2074 asked for it. */
2075 if (TARGET_VSX && VSX_REGNO_P (regno)
2076 && (VECTOR_MEM_VSX_P (mode)
2077 || FLOAT128_VECTOR_P (mode)
2078 || reg_addr[mode].scalar_in_vmx_p
2079 || mode == TImode
2080 || (TARGET_VADDUQM && mode == V1TImode)))
2082 if (FP_REGNO_P (regno))
2083 return FP_REGNO_P (last_regno);
2085 if (ALTIVEC_REGNO_P (regno))
2087 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2088 return 0;
2090 return ALTIVEC_REGNO_P (last_regno);
2094 /* The GPRs can hold any mode, but values bigger than one register
2095 cannot go past R31. */
2096 if (INT_REGNO_P (regno))
2097 return INT_REGNO_P (last_regno);
2099 /* The float registers (except for VSX vector modes) can only hold floating
2100 modes and DImode. */
2101 if (FP_REGNO_P (regno))
2103 if (FLOAT128_VECTOR_P (mode))
2104 return false;
2106 if (SCALAR_FLOAT_MODE_P (mode)
2107 && (mode != TDmode || (regno % 2) == 0)
2108 && FP_REGNO_P (last_regno))
2109 return 1;
2111 if (GET_MODE_CLASS (mode) == MODE_INT)
2113 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2114 return 1;
2116 if (TARGET_P8_VECTOR && (mode == SImode))
2117 return 1;
2119 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2120 return 1;
2123 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2124 && PAIRED_VECTOR_MODE (mode))
2125 return 1;
2127 return 0;
2130 /* The CR register can only hold CC modes. */
2131 if (CR_REGNO_P (regno))
2132 return GET_MODE_CLASS (mode) == MODE_CC;
2134 if (CA_REGNO_P (regno))
2135 return mode == Pmode || mode == SImode;
2137 /* AltiVec only in AldyVec registers. */
2138 if (ALTIVEC_REGNO_P (regno))
2139 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2140 || mode == V1TImode);
2142 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2143 and it must be able to fit within the register set. */
2145 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2148 /* Implement TARGET_HARD_REGNO_NREGS. */
2150 static unsigned int
2151 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2153 return rs6000_hard_regno_nregs[mode][regno];
2156 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2158 static bool
2159 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2161 return rs6000_hard_regno_mode_ok_p[mode][regno];
2164 /* Implement TARGET_MODES_TIEABLE_P.
2166 PTImode cannot tie with other modes because PTImode is restricted to even
2167 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2168 57744).
2170 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2171 128-bit floating point on VSX systems ties with other vectors. */
2173 static bool
2174 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2176 if (mode1 == PTImode)
2177 return mode2 == PTImode;
2178 if (mode2 == PTImode)
2179 return false;
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2182 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2183 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2184 return false;
2186 if (SCALAR_FLOAT_MODE_P (mode1))
2187 return SCALAR_FLOAT_MODE_P (mode2);
2188 if (SCALAR_FLOAT_MODE_P (mode2))
2189 return false;
2191 if (GET_MODE_CLASS (mode1) == MODE_CC)
2192 return GET_MODE_CLASS (mode2) == MODE_CC;
2193 if (GET_MODE_CLASS (mode2) == MODE_CC)
2194 return false;
2196 if (PAIRED_VECTOR_MODE (mode1))
2197 return PAIRED_VECTOR_MODE (mode2);
2198 if (PAIRED_VECTOR_MODE (mode2))
2199 return false;
2201 return true;
2204 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2206 static bool
2207 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2209 if (TARGET_32BIT
2210 && TARGET_POWERPC64
2211 && GET_MODE_SIZE (mode) > 4
2212 && INT_REGNO_P (regno))
2213 return true;
2215 if (TARGET_VSX
2216 && FP_REGNO_P (regno)
2217 && GET_MODE_SIZE (mode) > 8
2218 && !FLOAT128_2REG_P (mode))
2219 return true;
2221 return false;
2224 /* Print interesting facts about registers. */
2225 static void
2226 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2228 int r, m;
2230 for (r = first_regno; r <= last_regno; ++r)
2232 const char *comma = "";
2233 int len;
2235 if (first_regno == last_regno)
2236 fprintf (stderr, "%s:\t", reg_name);
2237 else
2238 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2240 len = 8;
2241 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2242 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2244 if (len > 70)
2246 fprintf (stderr, ",\n\t");
2247 len = 8;
2248 comma = "";
2251 if (rs6000_hard_regno_nregs[m][r] > 1)
2252 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2253 rs6000_hard_regno_nregs[m][r]);
2254 else
2255 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2257 comma = ", ";
2260 if (call_used_regs[r])
2262 if (len > 70)
2264 fprintf (stderr, ",\n\t");
2265 len = 8;
2266 comma = "";
2269 len += fprintf (stderr, "%s%s", comma, "call-used");
2270 comma = ", ";
2273 if (fixed_regs[r])
2275 if (len > 70)
2277 fprintf (stderr, ",\n\t");
2278 len = 8;
2279 comma = "";
2282 len += fprintf (stderr, "%s%s", comma, "fixed");
2283 comma = ", ";
2286 if (len > 70)
2288 fprintf (stderr, ",\n\t");
2289 comma = "";
2292 len += fprintf (stderr, "%sreg-class = %s", comma,
2293 reg_class_names[(int)rs6000_regno_regclass[r]]);
2294 comma = ", ";
2296 if (len > 70)
2298 fprintf (stderr, ",\n\t");
2299 comma = "";
2302 fprintf (stderr, "%sregno = %d\n", comma, r);
2306 static const char *
2307 rs6000_debug_vector_unit (enum rs6000_vector v)
2309 const char *ret;
2311 switch (v)
2313 case VECTOR_NONE: ret = "none"; break;
2314 case VECTOR_ALTIVEC: ret = "altivec"; break;
2315 case VECTOR_VSX: ret = "vsx"; break;
2316 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2317 case VECTOR_PAIRED: ret = "paired"; break;
2318 case VECTOR_OTHER: ret = "other"; break;
2319 default: ret = "unknown"; break;
2322 return ret;
2325 /* Inner function printing just the address mask for a particular reload
2326 register class. */
2327 DEBUG_FUNCTION char *
2328 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2330 static char ret[8];
2331 char *p = ret;
2333 if ((mask & RELOAD_REG_VALID) != 0)
2334 *p++ = 'v';
2335 else if (keep_spaces)
2336 *p++ = ' ';
2338 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2339 *p++ = 'm';
2340 else if (keep_spaces)
2341 *p++ = ' ';
2343 if ((mask & RELOAD_REG_INDEXED) != 0)
2344 *p++ = 'i';
2345 else if (keep_spaces)
2346 *p++ = ' ';
2348 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2349 *p++ = 'O';
2350 else if ((mask & RELOAD_REG_OFFSET) != 0)
2351 *p++ = 'o';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2355 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2356 *p++ = '+';
2357 else if (keep_spaces)
2358 *p++ = ' ';
2360 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2361 *p++ = '+';
2362 else if (keep_spaces)
2363 *p++ = ' ';
2365 if ((mask & RELOAD_REG_AND_M16) != 0)
2366 *p++ = '&';
2367 else if (keep_spaces)
2368 *p++ = ' ';
2370 *p = '\0';
2372 return ret;
2375 /* Print the address masks in a human readble fashion. */
2376 DEBUG_FUNCTION void
2377 rs6000_debug_print_mode (ssize_t m)
2379 ssize_t rc;
2380 int spaces = 0;
2381 bool fuse_extra_p;
2383 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2384 for (rc = 0; rc < N_RELOAD_REG; rc++)
2385 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2386 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2388 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2389 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2390 fprintf (stderr, " Reload=%c%c",
2391 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2392 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2393 else
2394 spaces += sizeof (" Reload=sl") - 1;
2396 if (reg_addr[m].scalar_in_vmx_p)
2398 fprintf (stderr, "%*s Upper=y", spaces, "");
2399 spaces = 0;
2401 else
2402 spaces += sizeof (" Upper=y") - 1;
2404 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2405 || reg_addr[m].fused_toc);
2406 if (!fuse_extra_p)
2408 for (rc = 0; rc < N_RELOAD_REG; rc++)
2410 if (rc != RELOAD_REG_ANY)
2412 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2413 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2414 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2415 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2416 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2418 fuse_extra_p = true;
2419 break;
2425 if (fuse_extra_p)
2427 fprintf (stderr, "%*s Fuse:", spaces, "");
2428 spaces = 0;
2430 for (rc = 0; rc < N_RELOAD_REG; rc++)
2432 if (rc != RELOAD_REG_ANY)
2434 char load, store;
2436 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2437 load = 'l';
2438 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2439 load = 'L';
2440 else
2441 load = '-';
2443 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2444 store = 's';
2445 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2446 store = 'S';
2447 else
2448 store = '-';
2450 if (load == '-' && store == '-')
2451 spaces += 5;
2452 else
2454 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2455 reload_reg_map[rc].name[0], load, store);
2456 spaces = 0;
2461 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2463 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2464 spaces = 0;
2466 else
2467 spaces += sizeof (" P8gpr") - 1;
2469 if (reg_addr[m].fused_toc)
2471 fprintf (stderr, "%*sToc", (spaces + 1), "");
2472 spaces = 0;
2474 else
2475 spaces += sizeof (" Toc") - 1;
2477 else
2478 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2480 if (rs6000_vector_unit[m] != VECTOR_NONE
2481 || rs6000_vector_mem[m] != VECTOR_NONE)
2483 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2484 spaces, "",
2485 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2486 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2489 fputs ("\n", stderr);
2492 #define DEBUG_FMT_ID "%-32s= "
2493 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2494 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2495 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2497 /* Print various interesting information with -mdebug=reg. */
2498 static void
2499 rs6000_debug_reg_global (void)
2501 static const char *const tf[2] = { "false", "true" };
2502 const char *nl = (const char *)0;
2503 int m;
2504 size_t m1, m2, v;
2505 char costly_num[20];
2506 char nop_num[20];
2507 char flags_buffer[40];
2508 const char *costly_str;
2509 const char *nop_str;
2510 const char *trace_str;
2511 const char *abi_str;
2512 const char *cmodel_str;
2513 struct cl_target_option cl_opts;
2515 /* Modes we want tieable information on. */
2516 static const machine_mode print_tieable_modes[] = {
2517 QImode,
2518 HImode,
2519 SImode,
2520 DImode,
2521 TImode,
2522 PTImode,
2523 SFmode,
2524 DFmode,
2525 TFmode,
2526 IFmode,
2527 KFmode,
2528 SDmode,
2529 DDmode,
2530 TDmode,
2531 V2SImode,
2532 V16QImode,
2533 V8HImode,
2534 V4SImode,
2535 V2DImode,
2536 V1TImode,
2537 V32QImode,
2538 V16HImode,
2539 V8SImode,
2540 V4DImode,
2541 V2TImode,
2542 V2SFmode,
2543 V4SFmode,
2544 V2DFmode,
2545 V8SFmode,
2546 V4DFmode,
2547 CCmode,
2548 CCUNSmode,
2549 CCEQmode,
2552 /* Virtual regs we are interested in. */
2553 const static struct {
2554 int regno; /* register number. */
2555 const char *name; /* register name. */
2556 } virtual_regs[] = {
2557 { STACK_POINTER_REGNUM, "stack pointer:" },
2558 { TOC_REGNUM, "toc: " },
2559 { STATIC_CHAIN_REGNUM, "static chain: " },
2560 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2561 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2562 { ARG_POINTER_REGNUM, "arg pointer: " },
2563 { FRAME_POINTER_REGNUM, "frame pointer:" },
2564 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2565 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2566 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2567 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2568 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2569 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2570 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2571 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2572 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2575 fputs ("\nHard register information:\n", stderr);
2576 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2577 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2578 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2579 LAST_ALTIVEC_REGNO,
2580 "vs");
2581 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2582 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2583 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2584 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2585 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2586 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2588 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2589 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2590 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2592 fprintf (stderr,
2593 "\n"
2594 "d reg_class = %s\n"
2595 "f reg_class = %s\n"
2596 "v reg_class = %s\n"
2597 "wa reg_class = %s\n"
2598 "wb reg_class = %s\n"
2599 "wd reg_class = %s\n"
2600 "we reg_class = %s\n"
2601 "wf reg_class = %s\n"
2602 "wg reg_class = %s\n"
2603 "wh reg_class = %s\n"
2604 "wi reg_class = %s\n"
2605 "wj reg_class = %s\n"
2606 "wk reg_class = %s\n"
2607 "wl reg_class = %s\n"
2608 "wm reg_class = %s\n"
2609 "wo reg_class = %s\n"
2610 "wp reg_class = %s\n"
2611 "wq reg_class = %s\n"
2612 "wr reg_class = %s\n"
2613 "ws reg_class = %s\n"
2614 "wt reg_class = %s\n"
2615 "wu reg_class = %s\n"
2616 "wv reg_class = %s\n"
2617 "ww reg_class = %s\n"
2618 "wx reg_class = %s\n"
2619 "wy reg_class = %s\n"
2620 "wz reg_class = %s\n"
2621 "wA reg_class = %s\n"
2622 "wH reg_class = %s\n"
2623 "wI reg_class = %s\n"
2624 "wJ reg_class = %s\n"
2625 "wK reg_class = %s\n"
2626 "\n",
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2651 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2652 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2653 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2654 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2655 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2656 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2657 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2658 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2660 nl = "\n";
2661 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2662 rs6000_debug_print_mode (m);
2664 fputs ("\n", stderr);
2666 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2668 machine_mode mode1 = print_tieable_modes[m1];
2669 bool first_time = true;
2671 nl = (const char *)0;
2672 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2674 machine_mode mode2 = print_tieable_modes[m2];
2675 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2677 if (first_time)
2679 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2680 nl = "\n";
2681 first_time = false;
2684 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2688 if (!first_time)
2689 fputs ("\n", stderr);
2692 if (nl)
2693 fputs (nl, stderr);
2695 if (rs6000_recip_control)
2697 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2699 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2700 if (rs6000_recip_bits[m])
2702 fprintf (stderr,
2703 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2704 GET_MODE_NAME (m),
2705 (RS6000_RECIP_AUTO_RE_P (m)
2706 ? "auto"
2707 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2708 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2709 ? "auto"
2710 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2713 fputs ("\n", stderr);
2716 if (rs6000_cpu_index >= 0)
2718 const char *name = processor_target_table[rs6000_cpu_index].name;
2719 HOST_WIDE_INT flags
2720 = processor_target_table[rs6000_cpu_index].target_enable;
2722 sprintf (flags_buffer, "-mcpu=%s flags", name);
2723 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2725 else
2726 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2728 if (rs6000_tune_index >= 0)
2730 const char *name = processor_target_table[rs6000_tune_index].name;
2731 HOST_WIDE_INT flags
2732 = processor_target_table[rs6000_tune_index].target_enable;
2734 sprintf (flags_buffer, "-mtune=%s flags", name);
2735 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2737 else
2738 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2740 cl_target_option_save (&cl_opts, &global_options);
2741 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2742 rs6000_isa_flags);
2744 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2745 rs6000_isa_flags_explicit);
2747 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2748 rs6000_builtin_mask);
2750 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2752 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2753 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2755 switch (rs6000_sched_costly_dep)
2757 case max_dep_latency:
2758 costly_str = "max_dep_latency";
2759 break;
2761 case no_dep_costly:
2762 costly_str = "no_dep_costly";
2763 break;
2765 case all_deps_costly:
2766 costly_str = "all_deps_costly";
2767 break;
2769 case true_store_to_load_dep_costly:
2770 costly_str = "true_store_to_load_dep_costly";
2771 break;
2773 case store_to_load_dep_costly:
2774 costly_str = "store_to_load_dep_costly";
2775 break;
2777 default:
2778 costly_str = costly_num;
2779 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2780 break;
2783 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2785 switch (rs6000_sched_insert_nops)
2787 case sched_finish_regroup_exact:
2788 nop_str = "sched_finish_regroup_exact";
2789 break;
2791 case sched_finish_pad_groups:
2792 nop_str = "sched_finish_pad_groups";
2793 break;
2795 case sched_finish_none:
2796 nop_str = "sched_finish_none";
2797 break;
2799 default:
2800 nop_str = nop_num;
2801 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2802 break;
2805 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2807 switch (rs6000_sdata)
2809 default:
2810 case SDATA_NONE:
2811 break;
2813 case SDATA_DATA:
2814 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2815 break;
2817 case SDATA_SYSV:
2818 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2819 break;
2821 case SDATA_EABI:
2822 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2823 break;
2827 switch (rs6000_traceback)
2829 case traceback_default: trace_str = "default"; break;
2830 case traceback_none: trace_str = "none"; break;
2831 case traceback_part: trace_str = "part"; break;
2832 case traceback_full: trace_str = "full"; break;
2833 default: trace_str = "unknown"; break;
2836 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2838 switch (rs6000_current_cmodel)
2840 case CMODEL_SMALL: cmodel_str = "small"; break;
2841 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2842 case CMODEL_LARGE: cmodel_str = "large"; break;
2843 default: cmodel_str = "unknown"; break;
2846 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2848 switch (rs6000_current_abi)
2850 case ABI_NONE: abi_str = "none"; break;
2851 case ABI_AIX: abi_str = "aix"; break;
2852 case ABI_ELFv2: abi_str = "ELFv2"; break;
2853 case ABI_V4: abi_str = "V4"; break;
2854 case ABI_DARWIN: abi_str = "darwin"; break;
2855 default: abi_str = "unknown"; break;
2858 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2860 if (rs6000_altivec_abi)
2861 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2863 if (rs6000_darwin64_abi)
2864 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2866 fprintf (stderr, DEBUG_FMT_S, "single_float",
2867 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2869 fprintf (stderr, DEBUG_FMT_S, "double_float",
2870 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2872 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2873 (TARGET_SOFT_FLOAT ? "true" : "false"));
2875 if (TARGET_LINK_STACK)
2876 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2878 if (TARGET_P8_FUSION)
2880 char options[80];
2882 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2883 if (TARGET_TOC_FUSION)
2884 strcat (options, ", toc");
2886 if (TARGET_P8_FUSION_SIGN)
2887 strcat (options, ", sign");
2889 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2892 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2893 TARGET_SECURE_PLT ? "secure" : "bss");
2894 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2895 aix_struct_return ? "aix" : "sysv");
2896 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2897 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2898 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2899 tf[!!rs6000_align_branch_targets]);
2900 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2901 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2902 rs6000_long_double_type_size);
2903 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2904 (int)rs6000_sched_restricted_insns_priority);
2905 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2906 (int)END_BUILTINS);
2907 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2908 (int)RS6000_BUILTIN_COUNT);
2910 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2911 (int)TARGET_FLOAT128_ENABLE_TYPE);
2913 if (TARGET_VSX)
2914 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2915 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2917 if (TARGET_DIRECT_MOVE_128)
2918 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2919 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2923 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2924 legitimate address support to figure out the appropriate addressing to
2925 use. */
2927 static void
2928 rs6000_setup_reg_addr_masks (void)
2930 ssize_t rc, reg, m, nregs;
2931 addr_mask_type any_addr_mask, addr_mask;
2933 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2935 machine_mode m2 = (machine_mode) m;
2936 bool complex_p = false;
2937 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2938 size_t msize;
2940 if (COMPLEX_MODE_P (m2))
2942 complex_p = true;
2943 m2 = GET_MODE_INNER (m2);
2946 msize = GET_MODE_SIZE (m2);
2948 /* SDmode is special in that we want to access it only via REG+REG
2949 addressing on power7 and above, since we want to use the LFIWZX and
2950 STFIWZX instructions to load it. */
2951 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2953 any_addr_mask = 0;
2954 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2956 addr_mask = 0;
2957 reg = reload_reg_map[rc].reg;
2959 /* Can mode values go in the GPR/FPR/Altivec registers? */
2960 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2962 bool small_int_vsx_p = (small_int_p
2963 && (rc == RELOAD_REG_FPR
2964 || rc == RELOAD_REG_VMX));
2966 nregs = rs6000_hard_regno_nregs[m][reg];
2967 addr_mask |= RELOAD_REG_VALID;
2969 /* Indicate if the mode takes more than 1 physical register. If
2970 it takes a single register, indicate it can do REG+REG
2971 addressing. Small integers in VSX registers can only do
2972 REG+REG addressing. */
2973 if (small_int_vsx_p)
2974 addr_mask |= RELOAD_REG_INDEXED;
2975 else if (nregs > 1 || m == BLKmode || complex_p)
2976 addr_mask |= RELOAD_REG_MULTIPLE;
2977 else
2978 addr_mask |= RELOAD_REG_INDEXED;
2980 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2981 addressing. If we allow scalars into Altivec registers,
2982 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2984 if (TARGET_UPDATE
2985 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2986 && msize <= 8
2987 && !VECTOR_MODE_P (m2)
2988 && !FLOAT128_VECTOR_P (m2)
2989 && !complex_p
2990 && !small_int_vsx_p)
2992 addr_mask |= RELOAD_REG_PRE_INCDEC;
2994 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2995 we don't allow PRE_MODIFY for some multi-register
2996 operations. */
2997 switch (m)
2999 default:
3000 addr_mask |= RELOAD_REG_PRE_MODIFY;
3001 break;
3003 case E_DImode:
3004 if (TARGET_POWERPC64)
3005 addr_mask |= RELOAD_REG_PRE_MODIFY;
3006 break;
3008 case E_DFmode:
3009 case E_DDmode:
3010 if (TARGET_DF_INSN)
3011 addr_mask |= RELOAD_REG_PRE_MODIFY;
3012 break;
3017 /* GPR and FPR registers can do REG+OFFSET addressing, except
3018 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3019 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3020 if ((addr_mask != 0) && !indexed_only_p
3021 && msize <= 8
3022 && (rc == RELOAD_REG_GPR
3023 || ((msize == 8 || m2 == SFmode)
3024 && (rc == RELOAD_REG_FPR
3025 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3026 addr_mask |= RELOAD_REG_OFFSET;
3028 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3029 instructions are enabled. The offset for 128-bit VSX registers is
3030 only 12-bits. While GPRs can handle the full offset range, VSX
3031 registers can only handle the restricted range. */
3032 else if ((addr_mask != 0) && !indexed_only_p
3033 && msize == 16 && TARGET_P9_VECTOR
3034 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3035 || (m2 == TImode && TARGET_VSX)))
3037 addr_mask |= RELOAD_REG_OFFSET;
3038 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3039 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3042 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3043 addressing on 128-bit types. */
3044 if (rc == RELOAD_REG_VMX && msize == 16
3045 && (addr_mask & RELOAD_REG_VALID) != 0)
3046 addr_mask |= RELOAD_REG_AND_M16;
3048 reg_addr[m].addr_mask[rc] = addr_mask;
3049 any_addr_mask |= addr_mask;
3052 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3057 /* Initialize the various global tables that are based on register size. */
3058 static void
3059 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3061 ssize_t r, m, c;
3062 int align64;
3063 int align32;
3065 /* Precalculate REGNO_REG_CLASS. */
3066 rs6000_regno_regclass[0] = GENERAL_REGS;
3067 for (r = 1; r < 32; ++r)
3068 rs6000_regno_regclass[r] = BASE_REGS;
3070 for (r = 32; r < 64; ++r)
3071 rs6000_regno_regclass[r] = FLOAT_REGS;
3073 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3074 rs6000_regno_regclass[r] = NO_REGS;
3076 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3077 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3079 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3080 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3081 rs6000_regno_regclass[r] = CR_REGS;
3083 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3084 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3085 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3086 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3087 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3088 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3089 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3090 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3091 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3092 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3094 /* Precalculate register class to simpler reload register class. We don't
3095 need all of the register classes that are combinations of different
3096 classes, just the simple ones that have constraint letters. */
3097 for (c = 0; c < N_REG_CLASSES; c++)
3098 reg_class_to_reg_type[c] = NO_REG_TYPE;
3100 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3101 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3102 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3103 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3104 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3105 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3106 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3107 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3108 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3109 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3111 if (TARGET_VSX)
3113 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3114 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3116 else
3118 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3119 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3122 /* Precalculate the valid memory formats as well as the vector information,
3123 this must be set up before the rs6000_hard_regno_nregs_internal calls
3124 below. */
3125 gcc_assert ((int)VECTOR_NONE == 0);
3126 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3127 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3129 gcc_assert ((int)CODE_FOR_nothing == 0);
3130 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3132 gcc_assert ((int)NO_REGS == 0);
3133 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3135 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3136 believes it can use native alignment or still uses 128-bit alignment. */
3137 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3139 align64 = 64;
3140 align32 = 32;
3142 else
3144 align64 = 128;
3145 align32 = 128;
3148 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3149 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3150 if (TARGET_FLOAT128_TYPE)
3152 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3153 rs6000_vector_align[KFmode] = 128;
3155 if (FLOAT128_IEEE_P (TFmode))
3157 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3158 rs6000_vector_align[TFmode] = 128;
3162 /* V2DF mode, VSX only. */
3163 if (TARGET_VSX)
3165 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3166 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3167 rs6000_vector_align[V2DFmode] = align64;
3170 /* V4SF mode, either VSX or Altivec. */
3171 if (TARGET_VSX)
3173 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3174 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3175 rs6000_vector_align[V4SFmode] = align32;
3177 else if (TARGET_ALTIVEC)
3179 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3180 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3181 rs6000_vector_align[V4SFmode] = align32;
3184 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3185 and stores. */
3186 if (TARGET_ALTIVEC)
3188 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3189 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3190 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3191 rs6000_vector_align[V4SImode] = align32;
3192 rs6000_vector_align[V8HImode] = align32;
3193 rs6000_vector_align[V16QImode] = align32;
3195 if (TARGET_VSX)
3197 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3198 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3199 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3201 else
3203 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3204 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3205 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3209 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3210 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3211 if (TARGET_VSX)
3213 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3214 rs6000_vector_unit[V2DImode]
3215 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3216 rs6000_vector_align[V2DImode] = align64;
3218 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3219 rs6000_vector_unit[V1TImode]
3220 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3221 rs6000_vector_align[V1TImode] = 128;
3224 /* DFmode, see if we want to use the VSX unit. Memory is handled
3225 differently, so don't set rs6000_vector_mem. */
3226 if (TARGET_VSX)
3228 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3229 rs6000_vector_align[DFmode] = 64;
3232 /* SFmode, see if we want to use the VSX unit. */
3233 if (TARGET_P8_VECTOR)
3235 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3236 rs6000_vector_align[SFmode] = 32;
3239 /* Allow TImode in VSX register and set the VSX memory macros. */
3240 if (TARGET_VSX)
3242 rs6000_vector_mem[TImode] = VECTOR_VSX;
3243 rs6000_vector_align[TImode] = align64;
3246 /* TODO add paired floating point vector support. */
3248 /* Register class constraints for the constraints that depend on compile
3249 switches. When the VSX code was added, different constraints were added
3250 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3251 of the VSX registers are used. The register classes for scalar floating
3252 point types is set, based on whether we allow that type into the upper
3253 (Altivec) registers. GCC has register classes to target the Altivec
3254 registers for load/store operations, to select using a VSX memory
3255 operation instead of the traditional floating point operation. The
3256 constraints are:
3258 d - Register class to use with traditional DFmode instructions.
3259 f - Register class to use with traditional SFmode instructions.
3260 v - Altivec register.
3261 wa - Any VSX register.
3262 wc - Reserved to represent individual CR bits (used in LLVM).
3263 wd - Preferred register class for V2DFmode.
3264 wf - Preferred register class for V4SFmode.
3265 wg - Float register for power6x move insns.
3266 wh - FP register for direct move instructions.
3267 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3268 wj - FP or VSX register to hold 64-bit integers for direct moves.
3269 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3270 wl - Float register if we can do 32-bit signed int loads.
3271 wm - VSX register for ISA 2.07 direct move operations.
3272 wn - always NO_REGS.
3273 wr - GPR if 64-bit mode is permitted.
3274 ws - Register class to do ISA 2.06 DF operations.
3275 wt - VSX register for TImode in VSX registers.
3276 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3277 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3278 ww - Register class to do SF conversions in with VSX operations.
3279 wx - Float register if we can do 32-bit int stores.
3280 wy - Register class to do ISA 2.07 SF operations.
3281 wz - Float register if we can do 32-bit unsigned int loads.
3282 wH - Altivec register if SImode is allowed in VSX registers.
3283 wI - VSX register if SImode is allowed in VSX registers.
3284 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3285 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3287 if (TARGET_HARD_FLOAT)
3288 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3290 if (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
3291 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3293 if (TARGET_VSX)
3295 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3296 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3297 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3298 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3299 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3300 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3301 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3304 /* Add conditional constraints based on various options, to allow us to
3305 collapse multiple insn patterns. */
3306 if (TARGET_ALTIVEC)
3307 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3309 if (TARGET_MFPGPR) /* DFmode */
3310 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3312 if (TARGET_LFIWAX)
3313 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3315 if (TARGET_DIRECT_MOVE)
3317 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3318 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3319 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3320 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3321 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3322 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3325 if (TARGET_POWERPC64)
3327 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3328 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3331 if (TARGET_P8_VECTOR) /* SFmode */
3333 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3334 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3335 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3337 else if (TARGET_VSX)
3338 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3340 if (TARGET_STFIWX)
3341 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3343 if (TARGET_LFIWZX)
3344 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3346 if (TARGET_FLOAT128_TYPE)
3348 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3349 if (FLOAT128_IEEE_P (TFmode))
3350 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3353 if (TARGET_P9_VECTOR)
3355 /* Support for new D-form instructions. */
3356 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3358 /* Support for ISA 3.0 (power9) vectors. */
3359 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3362 /* Support for new direct moves (ISA 3.0 + 64bit). */
3363 if (TARGET_DIRECT_MOVE_128)
3364 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3366 /* Support small integers in VSX registers. */
3367 if (TARGET_P8_VECTOR)
3369 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3370 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3371 if (TARGET_P9_VECTOR)
3373 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3374 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3378 /* Set up the reload helper and direct move functions. */
3379 if (TARGET_VSX || TARGET_ALTIVEC)
3381 if (TARGET_64BIT)
3383 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3384 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3385 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3386 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3387 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3388 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3389 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3390 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3391 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3392 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3393 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3394 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3395 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3396 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3397 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3398 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3399 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3400 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3401 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3402 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3404 if (FLOAT128_VECTOR_P (KFmode))
3406 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3407 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3410 if (FLOAT128_VECTOR_P (TFmode))
3412 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3413 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3416 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3417 available. */
3418 if (TARGET_NO_SDMODE_STACK)
3420 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3421 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3424 if (TARGET_VSX)
3426 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3427 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3430 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3432 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3433 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3434 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3435 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3436 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3437 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3438 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3439 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3440 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3442 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3443 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3444 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3445 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3446 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3447 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3448 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3449 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3450 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3452 if (FLOAT128_VECTOR_P (KFmode))
3454 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3455 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3458 if (FLOAT128_VECTOR_P (TFmode))
3460 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3461 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3465 else
3467 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3468 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3469 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3470 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3471 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3472 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3473 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3474 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3475 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3476 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3477 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3478 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3479 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3480 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3481 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3482 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3483 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3484 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3485 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3486 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3488 if (FLOAT128_VECTOR_P (KFmode))
3490 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3491 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3494 if (FLOAT128_IEEE_P (TFmode))
3496 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3497 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3500 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3501 available. */
3502 if (TARGET_NO_SDMODE_STACK)
3504 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3505 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3508 if (TARGET_VSX)
3510 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3511 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3514 if (TARGET_DIRECT_MOVE)
3516 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3517 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3518 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3522 reg_addr[DFmode].scalar_in_vmx_p = true;
3523 reg_addr[DImode].scalar_in_vmx_p = true;
3525 if (TARGET_P8_VECTOR)
3527 reg_addr[SFmode].scalar_in_vmx_p = true;
3528 reg_addr[SImode].scalar_in_vmx_p = true;
3530 if (TARGET_P9_VECTOR)
3532 reg_addr[HImode].scalar_in_vmx_p = true;
3533 reg_addr[QImode].scalar_in_vmx_p = true;
3538 /* Setup the fusion operations. */
3539 if (TARGET_P8_FUSION)
3541 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3542 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3543 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3544 if (TARGET_64BIT)
3545 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3548 if (TARGET_P9_FUSION)
3550 struct fuse_insns {
3551 enum machine_mode mode; /* mode of the fused type. */
3552 enum machine_mode pmode; /* pointer mode. */
3553 enum rs6000_reload_reg_type rtype; /* register type. */
3554 enum insn_code load; /* load insn. */
3555 enum insn_code store; /* store insn. */
3558 static const struct fuse_insns addis_insns[] = {
3559 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3560 CODE_FOR_fusion_vsx_di_sf_load,
3561 CODE_FOR_fusion_vsx_di_sf_store },
3563 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3564 CODE_FOR_fusion_vsx_si_sf_load,
3565 CODE_FOR_fusion_vsx_si_sf_store },
3567 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3568 CODE_FOR_fusion_vsx_di_df_load,
3569 CODE_FOR_fusion_vsx_di_df_store },
3571 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3572 CODE_FOR_fusion_vsx_si_df_load,
3573 CODE_FOR_fusion_vsx_si_df_store },
3575 { E_DImode, E_DImode, RELOAD_REG_FPR,
3576 CODE_FOR_fusion_vsx_di_di_load,
3577 CODE_FOR_fusion_vsx_di_di_store },
3579 { E_DImode, E_SImode, RELOAD_REG_FPR,
3580 CODE_FOR_fusion_vsx_si_di_load,
3581 CODE_FOR_fusion_vsx_si_di_store },
3583 { E_QImode, E_DImode, RELOAD_REG_GPR,
3584 CODE_FOR_fusion_gpr_di_qi_load,
3585 CODE_FOR_fusion_gpr_di_qi_store },
3587 { E_QImode, E_SImode, RELOAD_REG_GPR,
3588 CODE_FOR_fusion_gpr_si_qi_load,
3589 CODE_FOR_fusion_gpr_si_qi_store },
3591 { E_HImode, E_DImode, RELOAD_REG_GPR,
3592 CODE_FOR_fusion_gpr_di_hi_load,
3593 CODE_FOR_fusion_gpr_di_hi_store },
3595 { E_HImode, E_SImode, RELOAD_REG_GPR,
3596 CODE_FOR_fusion_gpr_si_hi_load,
3597 CODE_FOR_fusion_gpr_si_hi_store },
3599 { E_SImode, E_DImode, RELOAD_REG_GPR,
3600 CODE_FOR_fusion_gpr_di_si_load,
3601 CODE_FOR_fusion_gpr_di_si_store },
3603 { E_SImode, E_SImode, RELOAD_REG_GPR,
3604 CODE_FOR_fusion_gpr_si_si_load,
3605 CODE_FOR_fusion_gpr_si_si_store },
3607 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3608 CODE_FOR_fusion_gpr_di_sf_load,
3609 CODE_FOR_fusion_gpr_di_sf_store },
3611 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3612 CODE_FOR_fusion_gpr_si_sf_load,
3613 CODE_FOR_fusion_gpr_si_sf_store },
3615 { E_DImode, E_DImode, RELOAD_REG_GPR,
3616 CODE_FOR_fusion_gpr_di_di_load,
3617 CODE_FOR_fusion_gpr_di_di_store },
3619 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3620 CODE_FOR_fusion_gpr_di_df_load,
3621 CODE_FOR_fusion_gpr_di_df_store },
3624 machine_mode cur_pmode = Pmode;
3625 size_t i;
3627 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3629 machine_mode xmode = addis_insns[i].mode;
3630 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3632 if (addis_insns[i].pmode != cur_pmode)
3633 continue;
3635 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3636 continue;
3638 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3639 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3641 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3643 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3644 = addis_insns[i].load;
3645 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3646 = addis_insns[i].store;
3651 /* Note which types we support fusing TOC setup plus memory insn. We only do
3652 fused TOCs for medium/large code models. */
3653 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3654 && (TARGET_CMODEL != CMODEL_SMALL))
3656 reg_addr[QImode].fused_toc = true;
3657 reg_addr[HImode].fused_toc = true;
3658 reg_addr[SImode].fused_toc = true;
3659 reg_addr[DImode].fused_toc = true;
3660 if (TARGET_HARD_FLOAT)
3662 if (TARGET_SINGLE_FLOAT)
3663 reg_addr[SFmode].fused_toc = true;
3664 if (TARGET_DOUBLE_FLOAT)
3665 reg_addr[DFmode].fused_toc = true;
3669 /* Precalculate HARD_REGNO_NREGS. */
3670 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3671 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3672 rs6000_hard_regno_nregs[m][r]
3673 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3675 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3676 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3677 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3678 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3679 rs6000_hard_regno_mode_ok_p[m][r] = true;
3681 /* Precalculate CLASS_MAX_NREGS sizes. */
3682 for (c = 0; c < LIM_REG_CLASSES; ++c)
3684 int reg_size;
3686 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3687 reg_size = UNITS_PER_VSX_WORD;
3689 else if (c == ALTIVEC_REGS)
3690 reg_size = UNITS_PER_ALTIVEC_WORD;
3692 else if (c == FLOAT_REGS)
3693 reg_size = UNITS_PER_FP_WORD;
3695 else
3696 reg_size = UNITS_PER_WORD;
3698 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3700 machine_mode m2 = (machine_mode)m;
3701 int reg_size2 = reg_size;
3703 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3704 in VSX. */
3705 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3706 reg_size2 = UNITS_PER_FP_WORD;
3708 rs6000_class_max_nregs[m][c]
3709 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3713 /* Calculate which modes to automatically generate code to use a the
3714 reciprocal divide and square root instructions. In the future, possibly
3715 automatically generate the instructions even if the user did not specify
3716 -mrecip. The older machines double precision reciprocal sqrt estimate is
3717 not accurate enough. */
3718 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3719 if (TARGET_FRES)
3720 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3721 if (TARGET_FRE)
3722 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3723 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3724 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3725 if (VECTOR_UNIT_VSX_P (V2DFmode))
3726 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3728 if (TARGET_FRSQRTES)
3729 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3730 if (TARGET_FRSQRTE)
3731 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3732 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3733 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3734 if (VECTOR_UNIT_VSX_P (V2DFmode))
3735 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3737 if (rs6000_recip_control)
3739 if (!flag_finite_math_only)
3740 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3741 "-ffast-math");
3742 if (flag_trapping_math)
3743 warning (0, "%qs requires %qs or %qs", "-mrecip",
3744 "-fno-trapping-math", "-ffast-math");
3745 if (!flag_reciprocal_math)
3746 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3747 "-ffast-math");
3748 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3750 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3751 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3752 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3754 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3755 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3756 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3758 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3759 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3760 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3762 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3763 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3764 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3766 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3767 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3768 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3770 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3771 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3772 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3774 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3775 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3776 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3778 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3779 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3780 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3784 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3785 legitimate address support to figure out the appropriate addressing to
3786 use. */
3787 rs6000_setup_reg_addr_masks ();
3789 if (global_init_p || TARGET_DEBUG_TARGET)
3791 if (TARGET_DEBUG_REG)
3792 rs6000_debug_reg_global ();
3794 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3795 fprintf (stderr,
3796 "SImode variable mult cost = %d\n"
3797 "SImode constant mult cost = %d\n"
3798 "SImode short constant mult cost = %d\n"
3799 "DImode multipliciation cost = %d\n"
3800 "SImode division cost = %d\n"
3801 "DImode division cost = %d\n"
3802 "Simple fp operation cost = %d\n"
3803 "DFmode multiplication cost = %d\n"
3804 "SFmode division cost = %d\n"
3805 "DFmode division cost = %d\n"
3806 "cache line size = %d\n"
3807 "l1 cache size = %d\n"
3808 "l2 cache size = %d\n"
3809 "simultaneous prefetches = %d\n"
3810 "\n",
3811 rs6000_cost->mulsi,
3812 rs6000_cost->mulsi_const,
3813 rs6000_cost->mulsi_const9,
3814 rs6000_cost->muldi,
3815 rs6000_cost->divsi,
3816 rs6000_cost->divdi,
3817 rs6000_cost->fp,
3818 rs6000_cost->dmul,
3819 rs6000_cost->sdiv,
3820 rs6000_cost->ddiv,
3821 rs6000_cost->cache_line_size,
3822 rs6000_cost->l1_cache_size,
3823 rs6000_cost->l2_cache_size,
3824 rs6000_cost->simultaneous_prefetches);
3828 #if TARGET_MACHO
3829 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3831 static void
3832 darwin_rs6000_override_options (void)
3834 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3835 off. */
3836 rs6000_altivec_abi = 1;
3837 TARGET_ALTIVEC_VRSAVE = 1;
3838 rs6000_current_abi = ABI_DARWIN;
3840 if (DEFAULT_ABI == ABI_DARWIN
3841 && TARGET_64BIT)
3842 darwin_one_byte_bool = 1;
3844 if (TARGET_64BIT && ! TARGET_POWERPC64)
3846 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3847 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3849 if (flag_mkernel)
3851 rs6000_default_long_calls = 1;
3852 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3855 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3856 Altivec. */
3857 if (!flag_mkernel && !flag_apple_kext
3858 && TARGET_64BIT
3859 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3860 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3862 /* Unless the user (not the configurer) has explicitly overridden
3863 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3864 G4 unless targeting the kernel. */
3865 if (!flag_mkernel
3866 && !flag_apple_kext
3867 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3868 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3869 && ! global_options_set.x_rs6000_cpu_index)
3871 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3874 #endif
3876 /* If not otherwise specified by a target, make 'long double' equivalent to
3877 'double'. */
3879 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3880 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3881 #endif
3883 /* Return the builtin mask of the various options used that could affect which
3884 builtins were used. In the past we used target_flags, but we've run out of
3885 bits, and some options like PAIRED are no longer in target_flags. */
3887 HOST_WIDE_INT
3888 rs6000_builtin_mask_calculate (void)
3890 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3891 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3892 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3893 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3894 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3895 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3896 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3897 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3898 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3899 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3900 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3901 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3902 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3903 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3904 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3905 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3906 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3907 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3908 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3909 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3910 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3913 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3914 to clobber the XER[CA] bit because clobbering that bit without telling
3915 the compiler worked just fine with versions of GCC before GCC 5, and
3916 breaking a lot of older code in ways that are hard to track down is
3917 not such a great idea. */
3919 static rtx_insn *
3920 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3921 vec<const char *> &/*constraints*/,
3922 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3924 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3925 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3926 return NULL;
3929 /* Override command line options.
3931 Combine build-specific configuration information with options
3932 specified on the command line to set various state variables which
3933 influence code generation, optimization, and expansion of built-in
3934 functions. Assure that command-line configuration preferences are
3935 compatible with each other and with the build configuration; issue
3936 warnings while adjusting configuration or error messages while
3937 rejecting configuration.
3939 Upon entry to this function:
3941 This function is called once at the beginning of
3942 compilation, and then again at the start and end of compiling
3943 each section of code that has a different configuration, as
3944 indicated, for example, by adding the
3946 __attribute__((__target__("cpu=power9")))
3948 qualifier to a function definition or, for example, by bracketing
3949 code between
3951 #pragma GCC target("altivec")
3955 #pragma GCC reset_options
3957 directives. Parameter global_init_p is true for the initial
3958 invocation, which initializes global variables, and false for all
3959 subsequent invocations.
3962 Various global state information is assumed to be valid. This
3963 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3964 default CPU specified at build configure time, TARGET_DEFAULT,
3965 representing the default set of option flags for the default
3966 target, and global_options_set.x_rs6000_isa_flags, representing
3967 which options were requested on the command line.
3969 Upon return from this function:
3971 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3972 was set by name on the command line. Additionally, if certain
3973 attributes are automatically enabled or disabled by this function
3974 in order to assure compatibility between options and
3975 configuration, the flags associated with those attributes are
3976 also set. By setting these "explicit bits", we avoid the risk
3977 that other code might accidentally overwrite these particular
3978 attributes with "default values".
3980 The various bits of rs6000_isa_flags are set to indicate the
3981 target options that have been selected for the most current
3982 compilation efforts. This has the effect of also turning on the
3983 associated TARGET_XXX values since these are macros which are
3984 generally defined to test the corresponding bit of the
3985 rs6000_isa_flags variable.
3987 The variable rs6000_builtin_mask is set to represent the target
3988 options for the most current compilation efforts, consistent with
3989 the current contents of rs6000_isa_flags. This variable controls
3990 expansion of built-in functions.
3992 Various other global variables and fields of global structures
3993 (over 50 in all) are initialized to reflect the desired options
3994 for the most current compilation efforts. */
3996 static bool
3997 rs6000_option_override_internal (bool global_init_p)
3999 bool ret = true;
4000 bool have_cpu = false;
4002 /* The default cpu requested at configure time, if any. */
4003 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
4005 HOST_WIDE_INT set_masks;
4006 HOST_WIDE_INT ignore_masks;
4007 int cpu_index;
4008 int tune_index;
4009 struct cl_target_option *main_target_opt
4010 = ((global_init_p || target_option_default_node == NULL)
4011 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4013 /* Print defaults. */
4014 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4015 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4017 /* Remember the explicit arguments. */
4018 if (global_init_p)
4019 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4021 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4022 library functions, so warn about it. The flag may be useful for
4023 performance studies from time to time though, so don't disable it
4024 entirely. */
4025 if (global_options_set.x_rs6000_alignment_flags
4026 && rs6000_alignment_flags == MASK_ALIGN_POWER
4027 && DEFAULT_ABI == ABI_DARWIN
4028 && TARGET_64BIT)
4029 warning (0, "%qs is not supported for 64-bit Darwin;"
4030 " it is incompatible with the installed C and C++ libraries",
4031 "-malign-power");
4033 /* Numerous experiment shows that IRA based loop pressure
4034 calculation works better for RTL loop invariant motion on targets
4035 with enough (>= 32) registers. It is an expensive optimization.
4036 So it is on only for peak performance. */
4037 if (optimize >= 3 && global_init_p
4038 && !global_options_set.x_flag_ira_loop_pressure)
4039 flag_ira_loop_pressure = 1;
4041 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4042 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4043 options were already specified. */
4044 if (flag_sanitize & SANITIZE_USER_ADDRESS
4045 && !global_options_set.x_flag_asynchronous_unwind_tables)
4046 flag_asynchronous_unwind_tables = 1;
4048 /* Set the pointer size. */
4049 if (TARGET_64BIT)
4051 rs6000_pmode = DImode;
4052 rs6000_pointer_size = 64;
4054 else
4056 rs6000_pmode = SImode;
4057 rs6000_pointer_size = 32;
4060 /* Some OSs don't support saving the high part of 64-bit registers on context
4061 switch. Other OSs don't support saving Altivec registers. On those OSs,
4062 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4063 if the user wants either, the user must explicitly specify them and we
4064 won't interfere with the user's specification. */
4066 set_masks = POWERPC_MASKS;
4067 #ifdef OS_MISSING_POWERPC64
4068 if (OS_MISSING_POWERPC64)
4069 set_masks &= ~OPTION_MASK_POWERPC64;
4070 #endif
4071 #ifdef OS_MISSING_ALTIVEC
4072 if (OS_MISSING_ALTIVEC)
4073 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4074 | OTHER_VSX_VECTOR_MASKS);
4075 #endif
4077 /* Don't override by the processor default if given explicitly. */
4078 set_masks &= ~rs6000_isa_flags_explicit;
4080 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4081 the cpu in a target attribute or pragma, but did not specify a tuning
4082 option, use the cpu for the tuning option rather than the option specified
4083 with -mtune on the command line. Process a '--with-cpu' configuration
4084 request as an implicit --cpu. */
4085 if (rs6000_cpu_index >= 0)
4087 cpu_index = rs6000_cpu_index;
4088 have_cpu = true;
4090 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4092 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
4093 have_cpu = true;
4095 else if (implicit_cpu)
4097 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
4098 have_cpu = true;
4100 else
4102 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4103 const char *default_cpu = ((!TARGET_POWERPC64)
4104 ? "powerpc"
4105 : ((BYTES_BIG_ENDIAN)
4106 ? "powerpc64"
4107 : "powerpc64le"));
4109 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4110 have_cpu = false;
4113 gcc_assert (cpu_index >= 0);
4115 if (have_cpu)
4117 #ifndef HAVE_AS_POWER9
4118 if (processor_target_table[rs6000_cpu_index].processor
4119 == PROCESSOR_POWER9)
4121 have_cpu = false;
4122 warning (0, "will not generate power9 instructions because "
4123 "assembler lacks power9 support");
4125 #endif
4126 #ifndef HAVE_AS_POWER8
4127 if (processor_target_table[rs6000_cpu_index].processor
4128 == PROCESSOR_POWER8)
4130 have_cpu = false;
4131 warning (0, "will not generate power8 instructions because "
4132 "assembler lacks power8 support");
4134 #endif
4135 #ifndef HAVE_AS_POPCNTD
4136 if (processor_target_table[rs6000_cpu_index].processor
4137 == PROCESSOR_POWER7)
4139 have_cpu = false;
4140 warning (0, "will not generate power7 instructions because "
4141 "assembler lacks power7 support");
4143 #endif
4144 #ifndef HAVE_AS_DFP
4145 if (processor_target_table[rs6000_cpu_index].processor
4146 == PROCESSOR_POWER6)
4148 have_cpu = false;
4149 warning (0, "will not generate power6 instructions because "
4150 "assembler lacks power6 support");
4152 #endif
4153 #ifndef HAVE_AS_POPCNTB
4154 if (processor_target_table[rs6000_cpu_index].processor
4155 == PROCESSOR_POWER5)
4157 have_cpu = false;
4158 warning (0, "will not generate power5 instructions because "
4159 "assembler lacks power5 support");
4161 #endif
4163 if (!have_cpu)
4165 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4166 const char *default_cpu = (!TARGET_POWERPC64
4167 ? "powerpc"
4168 : (BYTES_BIG_ENDIAN
4169 ? "powerpc64"
4170 : "powerpc64le"));
4172 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
4176 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4177 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4178 with those from the cpu, except for options that were explicitly set. If
4179 we don't have a cpu, do not override the target bits set in
4180 TARGET_DEFAULT. */
4181 if (have_cpu)
4183 rs6000_isa_flags &= ~set_masks;
4184 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4185 & set_masks);
4187 else
4189 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4190 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4191 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4192 to using rs6000_isa_flags, we need to do the initialization here.
4194 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4195 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4196 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4197 : processor_target_table[cpu_index].target_enable);
4198 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4201 if (rs6000_tune_index >= 0)
4202 tune_index = rs6000_tune_index;
4203 else if (have_cpu)
4204 rs6000_tune_index = tune_index = cpu_index;
4205 else
4207 size_t i;
4208 enum processor_type tune_proc
4209 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4211 tune_index = -1;
4212 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4213 if (processor_target_table[i].processor == tune_proc)
4215 rs6000_tune_index = tune_index = i;
4216 break;
4220 gcc_assert (tune_index >= 0);
4221 rs6000_cpu = processor_target_table[tune_index].processor;
4223 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4224 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4225 || rs6000_cpu == PROCESSOR_PPCE5500)
4227 if (TARGET_ALTIVEC)
4228 error ("AltiVec not supported in this target");
4231 /* If we are optimizing big endian systems for space, use the load/store
4232 multiple and string instructions. */
4233 if (BYTES_BIG_ENDIAN && optimize_size)
4234 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4235 | OPTION_MASK_STRING);
4237 /* Don't allow -mmultiple or -mstring on little endian systems
4238 unless the cpu is a 750, because the hardware doesn't support the
4239 instructions used in little endian mode, and causes an alignment
4240 trap. The 750 does not cause an alignment trap (except when the
4241 target is unaligned). */
4243 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4245 if (TARGET_MULTIPLE)
4247 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4248 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4249 warning (0, "%qs is not supported on little endian systems",
4250 "-mmultiple");
4253 if (TARGET_STRING)
4255 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4256 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4257 warning (0, "%qs is not supported on little endian systems",
4258 "-mstring");
4262 /* If little-endian, default to -mstrict-align on older processors.
4263 Testing for htm matches power8 and later. */
4264 if (!BYTES_BIG_ENDIAN
4265 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4266 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4268 /* -maltivec={le,be} implies -maltivec. */
4269 if (rs6000_altivec_element_order != 0)
4270 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4272 /* Disallow -maltivec=le in big endian mode for now. This is not
4273 known to be useful for anyone. */
4274 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4276 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4277 rs6000_altivec_element_order = 0;
4280 if (!rs6000_fold_gimple)
4281 fprintf (stderr,
4282 "gimple folding of rs6000 builtins has been disabled.\n");
4284 /* Add some warnings for VSX. */
4285 if (TARGET_VSX)
4287 const char *msg = NULL;
4288 if (!TARGET_HARD_FLOAT || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4290 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4291 msg = N_("-mvsx requires hardware floating point");
4292 else
4294 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4295 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4298 else if (TARGET_PAIRED_FLOAT)
4299 msg = N_("-mvsx and -mpaired are incompatible");
4300 else if (TARGET_AVOID_XFORM > 0)
4301 msg = N_("-mvsx needs indexed addressing");
4302 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4303 & OPTION_MASK_ALTIVEC))
4305 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4306 msg = N_("-mvsx and -mno-altivec are incompatible");
4307 else
4308 msg = N_("-mno-altivec disables vsx");
4311 if (msg)
4313 warning (0, msg);
4314 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4315 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4319 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4320 the -mcpu setting to enable options that conflict. */
4321 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4322 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4323 | OPTION_MASK_ALTIVEC
4324 | OPTION_MASK_VSX)) != 0)
4325 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4326 | OPTION_MASK_DIRECT_MOVE)
4327 & ~rs6000_isa_flags_explicit);
4329 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4330 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4332 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4333 off all of the options that depend on those flags. */
4334 ignore_masks = rs6000_disable_incompatible_switches ();
4336 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4337 unless the user explicitly used the -mno-<option> to disable the code. */
4338 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4339 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4340 else if (TARGET_P9_MINMAX)
4342 if (have_cpu)
4344 if (cpu_index == PROCESSOR_POWER9)
4346 /* legacy behavior: allow -mcpu=power9 with certain
4347 capabilities explicitly disabled. */
4348 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4350 else
4351 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4352 "for <xxx> less than power9", "-mcpu");
4354 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4355 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4356 & rs6000_isa_flags_explicit))
4357 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4358 were explicitly cleared. */
4359 error ("%qs incompatible with explicitly disabled options",
4360 "-mpower9-minmax");
4361 else
4362 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4364 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4365 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4366 else if (TARGET_VSX)
4367 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4368 else if (TARGET_POPCNTD)
4369 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4370 else if (TARGET_DFP)
4371 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4372 else if (TARGET_CMPB)
4373 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4374 else if (TARGET_FPRND)
4375 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4376 else if (TARGET_POPCNTB)
4377 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4378 else if (TARGET_ALTIVEC)
4379 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4381 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4383 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4384 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4385 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4388 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4390 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4391 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4392 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4395 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4397 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4398 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4399 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4402 if (TARGET_P8_VECTOR && !TARGET_VSX)
4404 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4405 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4406 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4407 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4409 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4410 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4411 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4413 else
4415 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4416 not explicit. */
4417 rs6000_isa_flags |= OPTION_MASK_VSX;
4418 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4422 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4424 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4425 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4426 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4429 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4430 silently turn off quad memory mode. */
4431 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4433 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4434 warning (0, N_("-mquad-memory requires 64-bit mode"));
4436 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4437 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4439 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4440 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4443 /* Non-atomic quad memory load/store are disabled for little endian, since
4444 the words are reversed, but atomic operations can still be done by
4445 swapping the words. */
4446 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4448 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4449 warning (0, N_("-mquad-memory is not available in little endian "
4450 "mode"));
4452 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4455 /* Assume if the user asked for normal quad memory instructions, they want
4456 the atomic versions as well, unless they explicity told us not to use quad
4457 word atomic instructions. */
4458 if (TARGET_QUAD_MEMORY
4459 && !TARGET_QUAD_MEMORY_ATOMIC
4460 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4461 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4463 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4464 generating power8 instructions. */
4465 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4466 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4467 & OPTION_MASK_P8_FUSION);
4469 /* Setting additional fusion flags turns on base fusion. */
4470 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4472 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4474 if (TARGET_P8_FUSION_SIGN)
4475 error ("%qs requires %qs", "-mpower8-fusion-sign",
4476 "-mpower8-fusion");
4478 if (TARGET_TOC_FUSION)
4479 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4481 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4483 else
4484 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4487 /* Power9 fusion is a superset over power8 fusion. */
4488 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4490 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4492 /* We prefer to not mention undocumented options in
4493 error messages. However, if users have managed to select
4494 power9-fusion without selecting power8-fusion, they
4495 already know about undocumented flags. */
4496 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4497 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4499 else
4500 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4503 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4504 generating power9 instructions. */
4505 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4506 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4507 & OPTION_MASK_P9_FUSION);
4509 /* Power8 does not fuse sign extended loads with the addis. If we are
4510 optimizing at high levels for speed, convert a sign extended load into a
4511 zero extending load, and an explicit sign extension. */
4512 if (TARGET_P8_FUSION
4513 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4514 && optimize_function_for_speed_p (cfun)
4515 && optimize >= 3)
4516 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4518 /* TOC fusion requires 64-bit and medium/large code model. */
4519 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4521 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4522 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4523 warning (0, N_("-mtoc-fusion requires 64-bit"));
4526 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4528 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4529 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4530 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4533 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4534 model. */
4535 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4536 && (TARGET_CMODEL != CMODEL_SMALL)
4537 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4538 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4540 /* ISA 3.0 vector instructions include ISA 2.07. */
4541 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4543 /* We prefer to not mention undocumented options in
4544 error messages. However, if users have managed to select
4545 power9-vector without selecting power8-vector, they
4546 already know about undocumented flags. */
4547 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4548 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4549 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4550 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4552 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4553 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4554 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4556 else
4558 /* OPTION_MASK_P9_VECTOR is explicit and
4559 OPTION_MASK_P8_VECTOR is not explicit. */
4560 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4561 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4565 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4566 support. If we only have ISA 2.06 support, and the user did not specify
4567 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4568 but we don't enable the full vectorization support */
4569 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4570 TARGET_ALLOW_MOVMISALIGN = 1;
4572 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4574 if (TARGET_ALLOW_MOVMISALIGN > 0
4575 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4576 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4578 TARGET_ALLOW_MOVMISALIGN = 0;
4581 /* Determine when unaligned vector accesses are permitted, and when
4582 they are preferred over masked Altivec loads. Note that if
4583 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4584 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4585 not true. */
4586 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4588 if (!TARGET_VSX)
4590 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4591 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4593 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4596 else if (!TARGET_ALLOW_MOVMISALIGN)
4598 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4599 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4600 "-mallow-movmisalign");
4602 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4606 /* Set long double size before the IEEE 128-bit tests. */
4607 if (!global_options_set.x_rs6000_long_double_type_size)
4609 if (main_target_opt != NULL
4610 && (main_target_opt->x_rs6000_long_double_type_size
4611 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4612 error ("target attribute or pragma changes long double size");
4613 else
4614 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4617 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4618 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4619 pick up this default. */
4620 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4621 if (!global_options_set.x_rs6000_ieeequad)
4622 rs6000_ieeequad = 1;
4623 #endif
4625 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4626 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4627 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4628 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4629 the keyword as well as the type. */
4630 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4632 /* IEEE 128-bit floating point requires VSX support. */
4633 if (TARGET_FLOAT128_KEYWORD)
4635 if (!TARGET_VSX)
4637 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4638 error ("%qs requires VSX support", "-mfloat128");
4640 TARGET_FLOAT128_TYPE = 0;
4641 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4642 | OPTION_MASK_FLOAT128_HW);
4644 else if (!TARGET_FLOAT128_TYPE)
4646 TARGET_FLOAT128_TYPE = 1;
4647 warning (0, "The -mfloat128 option may not be fully supported");
4651 /* Enable the __float128 keyword under Linux by default. */
4652 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4653 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4654 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4656 /* If we have are supporting the float128 type and full ISA 3.0 support,
4657 enable -mfloat128-hardware by default. However, don't enable the
4658 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4659 because sometimes the compiler wants to put things in an integer
4660 container, and if we don't have __int128 support, it is impossible. */
4661 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4662 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4663 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4664 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4666 if (TARGET_FLOAT128_HW
4667 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4669 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4670 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4672 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4675 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4677 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4678 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4680 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4683 /* Print the options after updating the defaults. */
4684 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4685 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4687 /* E500mc does "better" if we inline more aggressively. Respect the
4688 user's opinion, though. */
4689 if (rs6000_block_move_inline_limit == 0
4690 && (rs6000_cpu == PROCESSOR_PPCE500MC
4691 || rs6000_cpu == PROCESSOR_PPCE500MC64
4692 || rs6000_cpu == PROCESSOR_PPCE5500
4693 || rs6000_cpu == PROCESSOR_PPCE6500))
4694 rs6000_block_move_inline_limit = 128;
4696 /* store_one_arg depends on expand_block_move to handle at least the
4697 size of reg_parm_stack_space. */
4698 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4699 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4701 if (global_init_p)
4703 /* If the appropriate debug option is enabled, replace the target hooks
4704 with debug versions that call the real version and then prints
4705 debugging information. */
4706 if (TARGET_DEBUG_COST)
4708 targetm.rtx_costs = rs6000_debug_rtx_costs;
4709 targetm.address_cost = rs6000_debug_address_cost;
4710 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4713 if (TARGET_DEBUG_ADDR)
4715 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4716 targetm.legitimize_address = rs6000_debug_legitimize_address;
4717 rs6000_secondary_reload_class_ptr
4718 = rs6000_debug_secondary_reload_class;
4719 rs6000_secondary_memory_needed_ptr
4720 = rs6000_debug_secondary_memory_needed;
4721 rs6000_cannot_change_mode_class_ptr
4722 = rs6000_debug_cannot_change_mode_class;
4723 rs6000_preferred_reload_class_ptr
4724 = rs6000_debug_preferred_reload_class;
4725 rs6000_legitimize_reload_address_ptr
4726 = rs6000_debug_legitimize_reload_address;
4727 rs6000_mode_dependent_address_ptr
4728 = rs6000_debug_mode_dependent_address;
4731 if (rs6000_veclibabi_name)
4733 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4734 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4735 else
4737 error ("unknown vectorization library ABI type (%qs) for "
4738 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4739 ret = false;
4744 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4745 target attribute or pragma which automatically enables both options,
4746 unless the altivec ABI was set. This is set by default for 64-bit, but
4747 not for 32-bit. */
4748 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4750 TARGET_FLOAT128_TYPE = 0;
4751 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4752 | OPTION_MASK_FLOAT128_KEYWORD)
4753 & ~rs6000_isa_flags_explicit);
4756 /* Enable Altivec ABI for AIX -maltivec. */
4757 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4759 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4760 error ("target attribute or pragma changes AltiVec ABI");
4761 else
4762 rs6000_altivec_abi = 1;
4765 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4766 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4767 be explicitly overridden in either case. */
4768 if (TARGET_ELF)
4770 if (!global_options_set.x_rs6000_altivec_abi
4771 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4773 if (main_target_opt != NULL &&
4774 !main_target_opt->x_rs6000_altivec_abi)
4775 error ("target attribute or pragma changes AltiVec ABI");
4776 else
4777 rs6000_altivec_abi = 1;
4781 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4782 So far, the only darwin64 targets are also MACH-O. */
4783 if (TARGET_MACHO
4784 && DEFAULT_ABI == ABI_DARWIN
4785 && TARGET_64BIT)
4787 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4788 error ("target attribute or pragma changes darwin64 ABI");
4789 else
4791 rs6000_darwin64_abi = 1;
4792 /* Default to natural alignment, for better performance. */
4793 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4797 /* Place FP constants in the constant pool instead of TOC
4798 if section anchors enabled. */
4799 if (flag_section_anchors
4800 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4801 TARGET_NO_FP_IN_TOC = 1;
4803 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4804 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4806 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4807 SUBTARGET_OVERRIDE_OPTIONS;
4808 #endif
4809 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4810 SUBSUBTARGET_OVERRIDE_OPTIONS;
4811 #endif
4812 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4813 SUB3TARGET_OVERRIDE_OPTIONS;
4814 #endif
4816 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4817 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4819 /* For the E500 family of cores, reset the single/double FP flags to let us
4820 check that they remain constant across attributes or pragmas. Also,
4821 clear a possible request for string instructions, not supported and which
4822 we might have silently queried above for -Os.
4824 For other families, clear ISEL in case it was set implicitly.
4827 switch (rs6000_cpu)
4829 case PROCESSOR_PPC8540:
4830 case PROCESSOR_PPC8548:
4831 case PROCESSOR_PPCE500MC:
4832 case PROCESSOR_PPCE500MC64:
4833 case PROCESSOR_PPCE5500:
4834 case PROCESSOR_PPCE6500:
4836 rs6000_single_float = 0;
4837 rs6000_double_float = 0;
4839 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4841 break;
4843 default:
4845 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4846 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4848 break;
4851 if (main_target_opt)
4853 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4854 error ("target attribute or pragma changes single precision floating "
4855 "point");
4856 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4857 error ("target attribute or pragma changes double precision floating "
4858 "point");
4861 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4862 && rs6000_cpu != PROCESSOR_POWER5
4863 && rs6000_cpu != PROCESSOR_POWER6
4864 && rs6000_cpu != PROCESSOR_POWER7
4865 && rs6000_cpu != PROCESSOR_POWER8
4866 && rs6000_cpu != PROCESSOR_POWER9
4867 && rs6000_cpu != PROCESSOR_PPCA2
4868 && rs6000_cpu != PROCESSOR_CELL
4869 && rs6000_cpu != PROCESSOR_PPC476);
4870 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4871 || rs6000_cpu == PROCESSOR_POWER5
4872 || rs6000_cpu == PROCESSOR_POWER7
4873 || rs6000_cpu == PROCESSOR_POWER8);
4874 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4875 || rs6000_cpu == PROCESSOR_POWER5
4876 || rs6000_cpu == PROCESSOR_POWER6
4877 || rs6000_cpu == PROCESSOR_POWER7
4878 || rs6000_cpu == PROCESSOR_POWER8
4879 || rs6000_cpu == PROCESSOR_POWER9
4880 || rs6000_cpu == PROCESSOR_PPCE500MC
4881 || rs6000_cpu == PROCESSOR_PPCE500MC64
4882 || rs6000_cpu == PROCESSOR_PPCE5500
4883 || rs6000_cpu == PROCESSOR_PPCE6500);
4885 /* Allow debug switches to override the above settings. These are set to -1
4886 in rs6000.opt to indicate the user hasn't directly set the switch. */
4887 if (TARGET_ALWAYS_HINT >= 0)
4888 rs6000_always_hint = TARGET_ALWAYS_HINT;
4890 if (TARGET_SCHED_GROUPS >= 0)
4891 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4893 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4894 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4896 rs6000_sched_restricted_insns_priority
4897 = (rs6000_sched_groups ? 1 : 0);
4899 /* Handle -msched-costly-dep option. */
4900 rs6000_sched_costly_dep
4901 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4903 if (rs6000_sched_costly_dep_str)
4905 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4906 rs6000_sched_costly_dep = no_dep_costly;
4907 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4908 rs6000_sched_costly_dep = all_deps_costly;
4909 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4910 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4911 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4912 rs6000_sched_costly_dep = store_to_load_dep_costly;
4913 else
4914 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4915 atoi (rs6000_sched_costly_dep_str));
4918 /* Handle -minsert-sched-nops option. */
4919 rs6000_sched_insert_nops
4920 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4922 if (rs6000_sched_insert_nops_str)
4924 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4925 rs6000_sched_insert_nops = sched_finish_none;
4926 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4927 rs6000_sched_insert_nops = sched_finish_pad_groups;
4928 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4929 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4930 else
4931 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4932 atoi (rs6000_sched_insert_nops_str));
4935 /* Handle stack protector */
4936 if (!global_options_set.x_rs6000_stack_protector_guard)
4937 #ifdef TARGET_THREAD_SSP_OFFSET
4938 rs6000_stack_protector_guard = SSP_TLS;
4939 #else
4940 rs6000_stack_protector_guard = SSP_GLOBAL;
4941 #endif
4943 #ifdef TARGET_THREAD_SSP_OFFSET
4944 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4945 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4946 #endif
4948 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4950 char *endp;
4951 const char *str = rs6000_stack_protector_guard_offset_str;
4953 errno = 0;
4954 long offset = strtol (str, &endp, 0);
4955 if (!*str || *endp || errno)
4956 error ("%qs is not a valid number in %qs", str,
4957 "-mstack-protector-guard-offset=");
4959 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4960 || (TARGET_64BIT && (offset & 3)))
4961 error ("%qs is not a valid offset in %qs", str,
4962 "-mstack-protector-guard-offset=");
4964 rs6000_stack_protector_guard_offset = offset;
4967 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4969 const char *str = rs6000_stack_protector_guard_reg_str;
4970 int reg = decode_reg_name (str);
4972 if (!IN_RANGE (reg, 1, 31))
4973 error ("%qs is not a valid base register in %qs", str,
4974 "-mstack-protector-guard-reg=");
4976 rs6000_stack_protector_guard_reg = reg;
4979 if (rs6000_stack_protector_guard == SSP_TLS
4980 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4981 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4983 if (global_init_p)
4985 #ifdef TARGET_REGNAMES
4986 /* If the user desires alternate register names, copy in the
4987 alternate names now. */
4988 if (TARGET_REGNAMES)
4989 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4990 #endif
4992 /* Set aix_struct_return last, after the ABI is determined.
4993 If -maix-struct-return or -msvr4-struct-return was explicitly
4994 used, don't override with the ABI default. */
4995 if (!global_options_set.x_aix_struct_return)
4996 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4998 #if 0
4999 /* IBM XL compiler defaults to unsigned bitfields. */
5000 if (TARGET_XL_COMPAT)
5001 flag_signed_bitfields = 0;
5002 #endif
5004 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
5005 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
5007 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
5009 /* We can only guarantee the availability of DI pseudo-ops when
5010 assembling for 64-bit targets. */
5011 if (!TARGET_64BIT)
5013 targetm.asm_out.aligned_op.di = NULL;
5014 targetm.asm_out.unaligned_op.di = NULL;
5018 /* Set branch target alignment, if not optimizing for size. */
5019 if (!optimize_size)
5021 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
5022 aligned 8byte to avoid misprediction by the branch predictor. */
5023 if (rs6000_cpu == PROCESSOR_TITAN
5024 || rs6000_cpu == PROCESSOR_CELL)
5026 if (align_functions <= 0)
5027 align_functions = 8;
5028 if (align_jumps <= 0)
5029 align_jumps = 8;
5030 if (align_loops <= 0)
5031 align_loops = 8;
5033 if (rs6000_align_branch_targets)
5035 if (align_functions <= 0)
5036 align_functions = 16;
5037 if (align_jumps <= 0)
5038 align_jumps = 16;
5039 if (align_loops <= 0)
5041 can_override_loop_align = 1;
5042 align_loops = 16;
5045 if (align_jumps_max_skip <= 0)
5046 align_jumps_max_skip = 15;
5047 if (align_loops_max_skip <= 0)
5048 align_loops_max_skip = 15;
5051 /* Arrange to save and restore machine status around nested functions. */
5052 init_machine_status = rs6000_init_machine_status;
5054 /* We should always be splitting complex arguments, but we can't break
5055 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5056 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5057 targetm.calls.split_complex_arg = NULL;
5059 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5060 if (DEFAULT_ABI == ABI_AIX)
5061 targetm.calls.custom_function_descriptors = 0;
5064 /* Initialize rs6000_cost with the appropriate target costs. */
5065 if (optimize_size)
5066 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5067 else
5068 switch (rs6000_cpu)
5070 case PROCESSOR_RS64A:
5071 rs6000_cost = &rs64a_cost;
5072 break;
5074 case PROCESSOR_MPCCORE:
5075 rs6000_cost = &mpccore_cost;
5076 break;
5078 case PROCESSOR_PPC403:
5079 rs6000_cost = &ppc403_cost;
5080 break;
5082 case PROCESSOR_PPC405:
5083 rs6000_cost = &ppc405_cost;
5084 break;
5086 case PROCESSOR_PPC440:
5087 rs6000_cost = &ppc440_cost;
5088 break;
5090 case PROCESSOR_PPC476:
5091 rs6000_cost = &ppc476_cost;
5092 break;
5094 case PROCESSOR_PPC601:
5095 rs6000_cost = &ppc601_cost;
5096 break;
5098 case PROCESSOR_PPC603:
5099 rs6000_cost = &ppc603_cost;
5100 break;
5102 case PROCESSOR_PPC604:
5103 rs6000_cost = &ppc604_cost;
5104 break;
5106 case PROCESSOR_PPC604e:
5107 rs6000_cost = &ppc604e_cost;
5108 break;
5110 case PROCESSOR_PPC620:
5111 rs6000_cost = &ppc620_cost;
5112 break;
5114 case PROCESSOR_PPC630:
5115 rs6000_cost = &ppc630_cost;
5116 break;
5118 case PROCESSOR_CELL:
5119 rs6000_cost = &ppccell_cost;
5120 break;
5122 case PROCESSOR_PPC750:
5123 case PROCESSOR_PPC7400:
5124 rs6000_cost = &ppc750_cost;
5125 break;
5127 case PROCESSOR_PPC7450:
5128 rs6000_cost = &ppc7450_cost;
5129 break;
5131 case PROCESSOR_PPC8540:
5132 case PROCESSOR_PPC8548:
5133 rs6000_cost = &ppc8540_cost;
5134 break;
5136 case PROCESSOR_PPCE300C2:
5137 case PROCESSOR_PPCE300C3:
5138 rs6000_cost = &ppce300c2c3_cost;
5139 break;
5141 case PROCESSOR_PPCE500MC:
5142 rs6000_cost = &ppce500mc_cost;
5143 break;
5145 case PROCESSOR_PPCE500MC64:
5146 rs6000_cost = &ppce500mc64_cost;
5147 break;
5149 case PROCESSOR_PPCE5500:
5150 rs6000_cost = &ppce5500_cost;
5151 break;
5153 case PROCESSOR_PPCE6500:
5154 rs6000_cost = &ppce6500_cost;
5155 break;
5157 case PROCESSOR_TITAN:
5158 rs6000_cost = &titan_cost;
5159 break;
5161 case PROCESSOR_POWER4:
5162 case PROCESSOR_POWER5:
5163 rs6000_cost = &power4_cost;
5164 break;
5166 case PROCESSOR_POWER6:
5167 rs6000_cost = &power6_cost;
5168 break;
5170 case PROCESSOR_POWER7:
5171 rs6000_cost = &power7_cost;
5172 break;
5174 case PROCESSOR_POWER8:
5175 rs6000_cost = &power8_cost;
5176 break;
5178 case PROCESSOR_POWER9:
5179 rs6000_cost = &power9_cost;
5180 break;
5182 case PROCESSOR_PPCA2:
5183 rs6000_cost = &ppca2_cost;
5184 break;
5186 default:
5187 gcc_unreachable ();
5190 if (global_init_p)
5192 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5193 rs6000_cost->simultaneous_prefetches,
5194 global_options.x_param_values,
5195 global_options_set.x_param_values);
5196 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5197 global_options.x_param_values,
5198 global_options_set.x_param_values);
5199 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5200 rs6000_cost->cache_line_size,
5201 global_options.x_param_values,
5202 global_options_set.x_param_values);
5203 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5204 global_options.x_param_values,
5205 global_options_set.x_param_values);
5207 /* Increase loop peeling limits based on performance analysis. */
5208 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5209 global_options.x_param_values,
5210 global_options_set.x_param_values);
5211 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5212 global_options.x_param_values,
5213 global_options_set.x_param_values);
5215 /* Use the 'model' -fsched-pressure algorithm by default. */
5216 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5217 SCHED_PRESSURE_MODEL,
5218 global_options.x_param_values,
5219 global_options_set.x_param_values);
5221 /* If using typedef char *va_list, signal that
5222 __builtin_va_start (&ap, 0) can be optimized to
5223 ap = __builtin_next_arg (0). */
5224 if (DEFAULT_ABI != ABI_V4)
5225 targetm.expand_builtin_va_start = NULL;
5228 /* Set up single/double float flags.
5229 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5230 then set both flags. */
5231 if (TARGET_HARD_FLOAT && rs6000_single_float == 0 && rs6000_double_float == 0)
5232 rs6000_single_float = rs6000_double_float = 1;
5234 /* If not explicitly specified via option, decide whether to generate indexed
5235 load/store instructions. A value of -1 indicates that the
5236 initial value of this variable has not been overwritten. During
5237 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5238 if (TARGET_AVOID_XFORM == -1)
5239 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5240 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5241 need indexed accesses and the type used is the scalar type of the element
5242 being loaded or stored. */
5243 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5244 && !TARGET_ALTIVEC);
5246 /* Set the -mrecip options. */
5247 if (rs6000_recip_name)
5249 char *p = ASTRDUP (rs6000_recip_name);
5250 char *q;
5251 unsigned int mask, i;
5252 bool invert;
5254 while ((q = strtok (p, ",")) != NULL)
5256 p = NULL;
5257 if (*q == '!')
5259 invert = true;
5260 q++;
5262 else
5263 invert = false;
5265 if (!strcmp (q, "default"))
5266 mask = ((TARGET_RECIP_PRECISION)
5267 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5268 else
5270 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5271 if (!strcmp (q, recip_options[i].string))
5273 mask = recip_options[i].mask;
5274 break;
5277 if (i == ARRAY_SIZE (recip_options))
5279 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5280 invert = false;
5281 mask = 0;
5282 ret = false;
5286 if (invert)
5287 rs6000_recip_control &= ~mask;
5288 else
5289 rs6000_recip_control |= mask;
5293 /* Set the builtin mask of the various options used that could affect which
5294 builtins were used. In the past we used target_flags, but we've run out
5295 of bits, and some options like PAIRED are no longer in target_flags. */
5296 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5297 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5298 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5299 rs6000_builtin_mask);
5301 /* Initialize all of the registers. */
5302 rs6000_init_hard_regno_mode_ok (global_init_p);
5304 /* Save the initial options in case the user does function specific options */
5305 if (global_init_p)
5306 target_option_default_node = target_option_current_node
5307 = build_target_option_node (&global_options);
5309 /* If not explicitly specified via option, decide whether to generate the
5310 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5311 if (TARGET_LINK_STACK == -1)
5312 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5314 return ret;
5317 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5318 define the target cpu type. */
5320 static void
5321 rs6000_option_override (void)
5323 (void) rs6000_option_override_internal (true);
5327 /* Implement targetm.vectorize.builtin_mask_for_load. */
5328 static tree
5329 rs6000_builtin_mask_for_load (void)
5331 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5332 if ((TARGET_ALTIVEC && !TARGET_VSX)
5333 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5334 return altivec_builtin_mask_for_load;
5335 else
5336 return 0;
5339 /* Implement LOOP_ALIGN. */
5341 rs6000_loop_align (rtx label)
5343 basic_block bb;
5344 int ninsns;
5346 /* Don't override loop alignment if -falign-loops was specified. */
5347 if (!can_override_loop_align)
5348 return align_loops_log;
5350 bb = BLOCK_FOR_INSN (label);
5351 ninsns = num_loop_insns(bb->loop_father);
5353 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5354 if (ninsns > 4 && ninsns <= 8
5355 && (rs6000_cpu == PROCESSOR_POWER4
5356 || rs6000_cpu == PROCESSOR_POWER5
5357 || rs6000_cpu == PROCESSOR_POWER6
5358 || rs6000_cpu == PROCESSOR_POWER7
5359 || rs6000_cpu == PROCESSOR_POWER8
5360 || rs6000_cpu == PROCESSOR_POWER9))
5361 return 5;
5362 else
5363 return align_loops_log;
5366 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5367 static int
5368 rs6000_loop_align_max_skip (rtx_insn *label)
5370 return (1 << rs6000_loop_align (label)) - 1;
5373 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5374 after applying N number of iterations. This routine does not determine
5375 how may iterations are required to reach desired alignment. */
5377 static bool
5378 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5380 if (is_packed)
5381 return false;
5383 if (TARGET_32BIT)
5385 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5386 return true;
5388 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5389 return true;
5391 return false;
5393 else
5395 if (TARGET_MACHO)
5396 return false;
5398 /* Assuming that all other types are naturally aligned. CHECKME! */
5399 return true;
5403 /* Return true if the vector misalignment factor is supported by the
5404 target. */
5405 static bool
5406 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5407 const_tree type,
5408 int misalignment,
5409 bool is_packed)
5411 if (TARGET_VSX)
5413 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5414 return true;
5416 /* Return if movmisalign pattern is not supported for this mode. */
5417 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5418 return false;
5420 if (misalignment == -1)
5422 /* Misalignment factor is unknown at compile time but we know
5423 it's word aligned. */
5424 if (rs6000_vector_alignment_reachable (type, is_packed))
5426 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5428 if (element_size == 64 || element_size == 32)
5429 return true;
5432 return false;
5435 /* VSX supports word-aligned vector. */
5436 if (misalignment % 4 == 0)
5437 return true;
5439 return false;
5442 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5443 static int
5444 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5445 tree vectype, int misalign)
5447 unsigned elements;
5448 tree elem_type;
5450 switch (type_of_cost)
5452 case scalar_stmt:
5453 case scalar_load:
5454 case scalar_store:
5455 case vector_stmt:
5456 case vector_load:
5457 case vector_store:
5458 case vec_to_scalar:
5459 case scalar_to_vec:
5460 case cond_branch_not_taken:
5461 return 1;
5463 case vec_perm:
5464 if (TARGET_VSX)
5465 return 3;
5466 else
5467 return 1;
5469 case vec_promote_demote:
5470 if (TARGET_VSX)
5471 return 4;
5472 else
5473 return 1;
5475 case cond_branch_taken:
5476 return 3;
5478 case unaligned_load:
5479 if (TARGET_P9_VECTOR)
5480 return 3;
5482 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5483 return 1;
5485 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5487 elements = TYPE_VECTOR_SUBPARTS (vectype);
5488 if (elements == 2)
5489 /* Double word aligned. */
5490 return 2;
5492 if (elements == 4)
5494 switch (misalign)
5496 case 8:
5497 /* Double word aligned. */
5498 return 2;
5500 case -1:
5501 /* Unknown misalignment. */
5502 case 4:
5503 case 12:
5504 /* Word aligned. */
5505 return 22;
5507 default:
5508 gcc_unreachable ();
5513 if (TARGET_ALTIVEC)
5514 /* Misaligned loads are not supported. */
5515 gcc_unreachable ();
5517 return 2;
5519 case unaligned_store:
5520 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5521 return 1;
5523 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5525 elements = TYPE_VECTOR_SUBPARTS (vectype);
5526 if (elements == 2)
5527 /* Double word aligned. */
5528 return 2;
5530 if (elements == 4)
5532 switch (misalign)
5534 case 8:
5535 /* Double word aligned. */
5536 return 2;
5538 case -1:
5539 /* Unknown misalignment. */
5540 case 4:
5541 case 12:
5542 /* Word aligned. */
5543 return 23;
5545 default:
5546 gcc_unreachable ();
5551 if (TARGET_ALTIVEC)
5552 /* Misaligned stores are not supported. */
5553 gcc_unreachable ();
5555 return 2;
5557 case vec_construct:
5558 /* This is a rough approximation assuming non-constant elements
5559 constructed into a vector via element insertion. FIXME:
5560 vec_construct is not granular enough for uniformly good
5561 decisions. If the initialization is a splat, this is
5562 cheaper than we estimate. Improve this someday. */
5563 elem_type = TREE_TYPE (vectype);
5564 /* 32-bit vectors loaded into registers are stored as double
5565 precision, so we need 2 permutes, 2 converts, and 1 merge
5566 to construct a vector of short floats from them. */
5567 if (SCALAR_FLOAT_TYPE_P (elem_type)
5568 && TYPE_PRECISION (elem_type) == 32)
5569 return 5;
5570 /* On POWER9, integer vector types are built up in GPRs and then
5571 use a direct move (2 cycles). For POWER8 this is even worse,
5572 as we need two direct moves and a merge, and the direct moves
5573 are five cycles. */
5574 else if (INTEGRAL_TYPE_P (elem_type))
5576 if (TARGET_P9_VECTOR)
5577 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5578 else
5579 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5581 else
5582 /* V2DFmode doesn't need a direct move. */
5583 return 2;
5585 default:
5586 gcc_unreachable ();
5590 /* Implement targetm.vectorize.preferred_simd_mode. */
5592 static machine_mode
5593 rs6000_preferred_simd_mode (scalar_mode mode)
5595 if (TARGET_VSX)
5596 switch (mode)
5598 case E_DFmode:
5599 return V2DFmode;
5600 default:;
5602 if (TARGET_ALTIVEC || TARGET_VSX)
5603 switch (mode)
5605 case E_SFmode:
5606 return V4SFmode;
5607 case E_TImode:
5608 return V1TImode;
5609 case E_DImode:
5610 return V2DImode;
5611 case E_SImode:
5612 return V4SImode;
5613 case E_HImode:
5614 return V8HImode;
5615 case E_QImode:
5616 return V16QImode;
5617 default:;
5619 if (TARGET_PAIRED_FLOAT
5620 && mode == SFmode)
5621 return V2SFmode;
5622 return word_mode;
5625 typedef struct _rs6000_cost_data
5627 struct loop *loop_info;
5628 unsigned cost[3];
5629 } rs6000_cost_data;
5631 /* Test for likely overcommitment of vector hardware resources. If a
5632 loop iteration is relatively large, and too large a percentage of
5633 instructions in the loop are vectorized, the cost model may not
5634 adequately reflect delays from unavailable vector resources.
5635 Penalize the loop body cost for this case. */
5637 static void
5638 rs6000_density_test (rs6000_cost_data *data)
5640 const int DENSITY_PCT_THRESHOLD = 85;
5641 const int DENSITY_SIZE_THRESHOLD = 70;
5642 const int DENSITY_PENALTY = 10;
5643 struct loop *loop = data->loop_info;
5644 basic_block *bbs = get_loop_body (loop);
5645 int nbbs = loop->num_nodes;
5646 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5647 int i, density_pct;
5649 for (i = 0; i < nbbs; i++)
5651 basic_block bb = bbs[i];
5652 gimple_stmt_iterator gsi;
5654 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5656 gimple *stmt = gsi_stmt (gsi);
5657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5659 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5660 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5661 not_vec_cost++;
5665 free (bbs);
5666 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5668 if (density_pct > DENSITY_PCT_THRESHOLD
5669 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5671 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5672 if (dump_enabled_p ())
5673 dump_printf_loc (MSG_NOTE, vect_location,
5674 "density %d%%, cost %d exceeds threshold, penalizing "
5675 "loop body cost by %d%%", density_pct,
5676 vec_cost + not_vec_cost, DENSITY_PENALTY);
5680 /* Implement targetm.vectorize.init_cost. */
5682 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5683 instruction is needed by the vectorization. */
5684 static bool rs6000_vect_nonmem;
5686 static void *
5687 rs6000_init_cost (struct loop *loop_info)
5689 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5690 data->loop_info = loop_info;
5691 data->cost[vect_prologue] = 0;
5692 data->cost[vect_body] = 0;
5693 data->cost[vect_epilogue] = 0;
5694 rs6000_vect_nonmem = false;
5695 return data;
5698 /* Implement targetm.vectorize.add_stmt_cost. */
5700 static unsigned
5701 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5702 struct _stmt_vec_info *stmt_info, int misalign,
5703 enum vect_cost_model_location where)
5705 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5706 unsigned retval = 0;
5708 if (flag_vect_cost_model)
5710 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5711 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5712 misalign);
5713 /* Statements in an inner loop relative to the loop being
5714 vectorized are weighted more heavily. The value here is
5715 arbitrary and could potentially be improved with analysis. */
5716 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5717 count *= 50; /* FIXME. */
5719 retval = (unsigned) (count * stmt_cost);
5720 cost_data->cost[where] += retval;
5722 /* Check whether we're doing something other than just a copy loop.
5723 Not all such loops may be profitably vectorized; see
5724 rs6000_finish_cost. */
5725 if ((kind == vec_to_scalar || kind == vec_perm
5726 || kind == vec_promote_demote || kind == vec_construct
5727 || kind == scalar_to_vec)
5728 || (where == vect_body && kind == vector_stmt))
5729 rs6000_vect_nonmem = true;
5732 return retval;
5735 /* Implement targetm.vectorize.finish_cost. */
5737 static void
5738 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5739 unsigned *body_cost, unsigned *epilogue_cost)
5741 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5743 if (cost_data->loop_info)
5744 rs6000_density_test (cost_data);
5746 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5747 that require versioning for any reason. The vectorization is at
5748 best a wash inside the loop, and the versioning checks make
5749 profitability highly unlikely and potentially quite harmful. */
5750 if (cost_data->loop_info)
5752 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5753 if (!rs6000_vect_nonmem
5754 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5755 && LOOP_REQUIRES_VERSIONING (vec_info))
5756 cost_data->cost[vect_body] += 10000;
5759 *prologue_cost = cost_data->cost[vect_prologue];
5760 *body_cost = cost_data->cost[vect_body];
5761 *epilogue_cost = cost_data->cost[vect_epilogue];
5764 /* Implement targetm.vectorize.destroy_cost_data. */
5766 static void
5767 rs6000_destroy_cost_data (void *data)
5769 free (data);
5772 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5773 library with vectorized intrinsics. */
5775 static tree
5776 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5777 tree type_in)
5779 char name[32];
5780 const char *suffix = NULL;
5781 tree fntype, new_fndecl, bdecl = NULL_TREE;
5782 int n_args = 1;
5783 const char *bname;
5784 machine_mode el_mode, in_mode;
5785 int n, in_n;
5787 /* Libmass is suitable for unsafe math only as it does not correctly support
5788 parts of IEEE with the required precision such as denormals. Only support
5789 it if we have VSX to use the simd d2 or f4 functions.
5790 XXX: Add variable length support. */
5791 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5792 return NULL_TREE;
5794 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5795 n = TYPE_VECTOR_SUBPARTS (type_out);
5796 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5797 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5798 if (el_mode != in_mode
5799 || n != in_n)
5800 return NULL_TREE;
5802 switch (fn)
5804 CASE_CFN_ATAN2:
5805 CASE_CFN_HYPOT:
5806 CASE_CFN_POW:
5807 n_args = 2;
5808 gcc_fallthrough ();
5810 CASE_CFN_ACOS:
5811 CASE_CFN_ACOSH:
5812 CASE_CFN_ASIN:
5813 CASE_CFN_ASINH:
5814 CASE_CFN_ATAN:
5815 CASE_CFN_ATANH:
5816 CASE_CFN_CBRT:
5817 CASE_CFN_COS:
5818 CASE_CFN_COSH:
5819 CASE_CFN_ERF:
5820 CASE_CFN_ERFC:
5821 CASE_CFN_EXP2:
5822 CASE_CFN_EXP:
5823 CASE_CFN_EXPM1:
5824 CASE_CFN_LGAMMA:
5825 CASE_CFN_LOG10:
5826 CASE_CFN_LOG1P:
5827 CASE_CFN_LOG2:
5828 CASE_CFN_LOG:
5829 CASE_CFN_SIN:
5830 CASE_CFN_SINH:
5831 CASE_CFN_SQRT:
5832 CASE_CFN_TAN:
5833 CASE_CFN_TANH:
5834 if (el_mode == DFmode && n == 2)
5836 bdecl = mathfn_built_in (double_type_node, fn);
5837 suffix = "d2"; /* pow -> powd2 */
5839 else if (el_mode == SFmode && n == 4)
5841 bdecl = mathfn_built_in (float_type_node, fn);
5842 suffix = "4"; /* powf -> powf4 */
5844 else
5845 return NULL_TREE;
5846 if (!bdecl)
5847 return NULL_TREE;
5848 break;
5850 default:
5851 return NULL_TREE;
5854 gcc_assert (suffix != NULL);
5855 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5856 if (!bname)
5857 return NULL_TREE;
5859 strcpy (name, bname + sizeof ("__builtin_") - 1);
5860 strcat (name, suffix);
5862 if (n_args == 1)
5863 fntype = build_function_type_list (type_out, type_in, NULL);
5864 else if (n_args == 2)
5865 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5866 else
5867 gcc_unreachable ();
5869 /* Build a function declaration for the vectorized function. */
5870 new_fndecl = build_decl (BUILTINS_LOCATION,
5871 FUNCTION_DECL, get_identifier (name), fntype);
5872 TREE_PUBLIC (new_fndecl) = 1;
5873 DECL_EXTERNAL (new_fndecl) = 1;
5874 DECL_IS_NOVOPS (new_fndecl) = 1;
5875 TREE_READONLY (new_fndecl) = 1;
5877 return new_fndecl;
5880 /* Returns a function decl for a vectorized version of the builtin function
5881 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5882 if it is not available. */
5884 static tree
5885 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5886 tree type_in)
5888 machine_mode in_mode, out_mode;
5889 int in_n, out_n;
5891 if (TARGET_DEBUG_BUILTIN)
5892 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5893 combined_fn_name (combined_fn (fn)),
5894 GET_MODE_NAME (TYPE_MODE (type_out)),
5895 GET_MODE_NAME (TYPE_MODE (type_in)));
5897 if (TREE_CODE (type_out) != VECTOR_TYPE
5898 || TREE_CODE (type_in) != VECTOR_TYPE)
5899 return NULL_TREE;
5901 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5902 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5903 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5904 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5906 switch (fn)
5908 CASE_CFN_COPYSIGN:
5909 if (VECTOR_UNIT_VSX_P (V2DFmode)
5910 && out_mode == DFmode && out_n == 2
5911 && in_mode == DFmode && in_n == 2)
5912 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5913 if (VECTOR_UNIT_VSX_P (V4SFmode)
5914 && out_mode == SFmode && out_n == 4
5915 && in_mode == SFmode && in_n == 4)
5916 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5917 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5918 && out_mode == SFmode && out_n == 4
5919 && in_mode == SFmode && in_n == 4)
5920 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5921 break;
5922 CASE_CFN_CEIL:
5923 if (VECTOR_UNIT_VSX_P (V2DFmode)
5924 && out_mode == DFmode && out_n == 2
5925 && in_mode == DFmode && in_n == 2)
5926 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5927 if (VECTOR_UNIT_VSX_P (V4SFmode)
5928 && out_mode == SFmode && out_n == 4
5929 && in_mode == SFmode && in_n == 4)
5930 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5931 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5932 && out_mode == SFmode && out_n == 4
5933 && in_mode == SFmode && in_n == 4)
5934 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5935 break;
5936 CASE_CFN_FLOOR:
5937 if (VECTOR_UNIT_VSX_P (V2DFmode)
5938 && out_mode == DFmode && out_n == 2
5939 && in_mode == DFmode && in_n == 2)
5940 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5941 if (VECTOR_UNIT_VSX_P (V4SFmode)
5942 && out_mode == SFmode && out_n == 4
5943 && in_mode == SFmode && in_n == 4)
5944 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5945 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5946 && out_mode == SFmode && out_n == 4
5947 && in_mode == SFmode && in_n == 4)
5948 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5949 break;
5950 CASE_CFN_FMA:
5951 if (VECTOR_UNIT_VSX_P (V2DFmode)
5952 && out_mode == DFmode && out_n == 2
5953 && in_mode == DFmode && in_n == 2)
5954 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5955 if (VECTOR_UNIT_VSX_P (V4SFmode)
5956 && out_mode == SFmode && out_n == 4
5957 && in_mode == SFmode && in_n == 4)
5958 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5959 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5960 && out_mode == SFmode && out_n == 4
5961 && in_mode == SFmode && in_n == 4)
5962 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5963 break;
5964 CASE_CFN_TRUNC:
5965 if (VECTOR_UNIT_VSX_P (V2DFmode)
5966 && out_mode == DFmode && out_n == 2
5967 && in_mode == DFmode && in_n == 2)
5968 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5969 if (VECTOR_UNIT_VSX_P (V4SFmode)
5970 && out_mode == SFmode && out_n == 4
5971 && in_mode == SFmode && in_n == 4)
5972 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5973 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5974 && out_mode == SFmode && out_n == 4
5975 && in_mode == SFmode && in_n == 4)
5976 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5977 break;
5978 CASE_CFN_NEARBYINT:
5979 if (VECTOR_UNIT_VSX_P (V2DFmode)
5980 && flag_unsafe_math_optimizations
5981 && out_mode == DFmode && out_n == 2
5982 && in_mode == DFmode && in_n == 2)
5983 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5984 if (VECTOR_UNIT_VSX_P (V4SFmode)
5985 && flag_unsafe_math_optimizations
5986 && out_mode == SFmode && out_n == 4
5987 && in_mode == SFmode && in_n == 4)
5988 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5989 break;
5990 CASE_CFN_RINT:
5991 if (VECTOR_UNIT_VSX_P (V2DFmode)
5992 && !flag_trapping_math
5993 && out_mode == DFmode && out_n == 2
5994 && in_mode == DFmode && in_n == 2)
5995 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5996 if (VECTOR_UNIT_VSX_P (V4SFmode)
5997 && !flag_trapping_math
5998 && out_mode == SFmode && out_n == 4
5999 && in_mode == SFmode && in_n == 4)
6000 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
6001 break;
6002 default:
6003 break;
6006 /* Generate calls to libmass if appropriate. */
6007 if (rs6000_veclib_handler)
6008 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
6010 return NULL_TREE;
6013 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
6015 static tree
6016 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
6017 tree type_in)
6019 machine_mode in_mode, out_mode;
6020 int in_n, out_n;
6022 if (TARGET_DEBUG_BUILTIN)
6023 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
6024 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
6025 GET_MODE_NAME (TYPE_MODE (type_out)),
6026 GET_MODE_NAME (TYPE_MODE (type_in)));
6028 if (TREE_CODE (type_out) != VECTOR_TYPE
6029 || TREE_CODE (type_in) != VECTOR_TYPE)
6030 return NULL_TREE;
6032 out_mode = TYPE_MODE (TREE_TYPE (type_out));
6033 out_n = TYPE_VECTOR_SUBPARTS (type_out);
6034 in_mode = TYPE_MODE (TREE_TYPE (type_in));
6035 in_n = TYPE_VECTOR_SUBPARTS (type_in);
6037 enum rs6000_builtins fn
6038 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
6039 switch (fn)
6041 case RS6000_BUILTIN_RSQRTF:
6042 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6043 && out_mode == SFmode && out_n == 4
6044 && in_mode == SFmode && in_n == 4)
6045 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
6046 break;
6047 case RS6000_BUILTIN_RSQRT:
6048 if (VECTOR_UNIT_VSX_P (V2DFmode)
6049 && out_mode == DFmode && out_n == 2
6050 && in_mode == DFmode && in_n == 2)
6051 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
6052 break;
6053 case RS6000_BUILTIN_RECIPF:
6054 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
6055 && out_mode == SFmode && out_n == 4
6056 && in_mode == SFmode && in_n == 4)
6057 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
6058 break;
6059 case RS6000_BUILTIN_RECIP:
6060 if (VECTOR_UNIT_VSX_P (V2DFmode)
6061 && out_mode == DFmode && out_n == 2
6062 && in_mode == DFmode && in_n == 2)
6063 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
6064 break;
6065 default:
6066 break;
6068 return NULL_TREE;
6071 /* Default CPU string for rs6000*_file_start functions. */
6072 static const char *rs6000_default_cpu;
6074 /* Do anything needed at the start of the asm file. */
6076 static void
6077 rs6000_file_start (void)
6079 char buffer[80];
6080 const char *start = buffer;
6081 FILE *file = asm_out_file;
6083 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6085 default_file_start ();
6087 if (flag_verbose_asm)
6089 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6091 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6093 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6094 start = "";
6097 if (global_options_set.x_rs6000_cpu_index)
6099 fprintf (file, "%s -mcpu=%s", start,
6100 processor_target_table[rs6000_cpu_index].name);
6101 start = "";
6104 if (global_options_set.x_rs6000_tune_index)
6106 fprintf (file, "%s -mtune=%s", start,
6107 processor_target_table[rs6000_tune_index].name);
6108 start = "";
6111 if (PPC405_ERRATUM77)
6113 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6114 start = "";
6117 #ifdef USING_ELFOS_H
6118 switch (rs6000_sdata)
6120 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6121 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6122 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6123 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6126 if (rs6000_sdata && g_switch_value)
6128 fprintf (file, "%s -G %d", start,
6129 g_switch_value);
6130 start = "";
6132 #endif
6134 if (*start == '\0')
6135 putc ('\n', file);
6138 #ifdef USING_ELFOS_H
6139 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6140 && !global_options_set.x_rs6000_cpu_index)
6142 fputs ("\t.machine ", asm_out_file);
6143 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6144 fputs ("power9\n", asm_out_file);
6145 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6146 fputs ("power8\n", asm_out_file);
6147 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6148 fputs ("power7\n", asm_out_file);
6149 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6150 fputs ("power6\n", asm_out_file);
6151 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6152 fputs ("power5\n", asm_out_file);
6153 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6154 fputs ("power4\n", asm_out_file);
6155 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6156 fputs ("ppc64\n", asm_out_file);
6157 else
6158 fputs ("ppc\n", asm_out_file);
6160 #endif
6162 if (DEFAULT_ABI == ABI_ELFv2)
6163 fprintf (file, "\t.abiversion 2\n");
6167 /* Return nonzero if this function is known to have a null epilogue. */
6170 direct_return (void)
6172 if (reload_completed)
6174 rs6000_stack_t *info = rs6000_stack_info ();
6176 if (info->first_gp_reg_save == 32
6177 && info->first_fp_reg_save == 64
6178 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6179 && ! info->lr_save_p
6180 && ! info->cr_save_p
6181 && info->vrsave_size == 0
6182 && ! info->push_p)
6183 return 1;
6186 return 0;
6189 /* Return the number of instructions it takes to form a constant in an
6190 integer register. */
6193 num_insns_constant_wide (HOST_WIDE_INT value)
6195 /* signed constant loadable with addi */
6196 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6197 return 1;
6199 /* constant loadable with addis */
6200 else if ((value & 0xffff) == 0
6201 && (value >> 31 == -1 || value >> 31 == 0))
6202 return 1;
6204 else if (TARGET_POWERPC64)
6206 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6207 HOST_WIDE_INT high = value >> 31;
6209 if (high == 0 || high == -1)
6210 return 2;
6212 high >>= 1;
6214 if (low == 0)
6215 return num_insns_constant_wide (high) + 1;
6216 else if (high == 0)
6217 return num_insns_constant_wide (low) + 1;
6218 else
6219 return (num_insns_constant_wide (high)
6220 + num_insns_constant_wide (low) + 1);
6223 else
6224 return 2;
6228 num_insns_constant (rtx op, machine_mode mode)
6230 HOST_WIDE_INT low, high;
6232 switch (GET_CODE (op))
6234 case CONST_INT:
6235 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6236 && rs6000_is_valid_and_mask (op, mode))
6237 return 2;
6238 else
6239 return num_insns_constant_wide (INTVAL (op));
6241 case CONST_WIDE_INT:
6243 int i;
6244 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6245 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6246 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6247 return ins;
6250 case CONST_DOUBLE:
6251 if (mode == SFmode || mode == SDmode)
6253 long l;
6255 if (DECIMAL_FLOAT_MODE_P (mode))
6256 REAL_VALUE_TO_TARGET_DECIMAL32
6257 (*CONST_DOUBLE_REAL_VALUE (op), l);
6258 else
6259 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6260 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6263 long l[2];
6264 if (DECIMAL_FLOAT_MODE_P (mode))
6265 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6266 else
6267 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6268 high = l[WORDS_BIG_ENDIAN == 0];
6269 low = l[WORDS_BIG_ENDIAN != 0];
6271 if (TARGET_32BIT)
6272 return (num_insns_constant_wide (low)
6273 + num_insns_constant_wide (high));
6274 else
6276 if ((high == 0 && low >= 0)
6277 || (high == -1 && low < 0))
6278 return num_insns_constant_wide (low);
6280 else if (rs6000_is_valid_and_mask (op, mode))
6281 return 2;
6283 else if (low == 0)
6284 return num_insns_constant_wide (high) + 1;
6286 else
6287 return (num_insns_constant_wide (high)
6288 + num_insns_constant_wide (low) + 1);
6291 default:
6292 gcc_unreachable ();
6296 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6297 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6298 corresponding element of the vector, but for V4SFmode and V2SFmode,
6299 the corresponding "float" is interpreted as an SImode integer. */
6301 HOST_WIDE_INT
6302 const_vector_elt_as_int (rtx op, unsigned int elt)
6304 rtx tmp;
6306 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6307 gcc_assert (GET_MODE (op) != V2DImode
6308 && GET_MODE (op) != V2DFmode);
6310 tmp = CONST_VECTOR_ELT (op, elt);
6311 if (GET_MODE (op) == V4SFmode
6312 || GET_MODE (op) == V2SFmode)
6313 tmp = gen_lowpart (SImode, tmp);
6314 return INTVAL (tmp);
6317 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6318 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6319 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6320 all items are set to the same value and contain COPIES replicas of the
6321 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6322 operand and the others are set to the value of the operand's msb. */
6324 static bool
6325 vspltis_constant (rtx op, unsigned step, unsigned copies)
6327 machine_mode mode = GET_MODE (op);
6328 machine_mode inner = GET_MODE_INNER (mode);
6330 unsigned i;
6331 unsigned nunits;
6332 unsigned bitsize;
6333 unsigned mask;
6335 HOST_WIDE_INT val;
6336 HOST_WIDE_INT splat_val;
6337 HOST_WIDE_INT msb_val;
6339 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6340 return false;
6342 nunits = GET_MODE_NUNITS (mode);
6343 bitsize = GET_MODE_BITSIZE (inner);
6344 mask = GET_MODE_MASK (inner);
6346 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6347 splat_val = val;
6348 msb_val = val >= 0 ? 0 : -1;
6350 /* Construct the value to be splatted, if possible. If not, return 0. */
6351 for (i = 2; i <= copies; i *= 2)
6353 HOST_WIDE_INT small_val;
6354 bitsize /= 2;
6355 small_val = splat_val >> bitsize;
6356 mask >>= bitsize;
6357 if (splat_val != ((HOST_WIDE_INT)
6358 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6359 | (small_val & mask)))
6360 return false;
6361 splat_val = small_val;
6364 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6365 if (EASY_VECTOR_15 (splat_val))
6368 /* Also check if we can splat, and then add the result to itself. Do so if
6369 the value is positive, of if the splat instruction is using OP's mode;
6370 for splat_val < 0, the splat and the add should use the same mode. */
6371 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6372 && (splat_val >= 0 || (step == 1 && copies == 1)))
6375 /* Also check if are loading up the most significant bit which can be done by
6376 loading up -1 and shifting the value left by -1. */
6377 else if (EASY_VECTOR_MSB (splat_val, inner))
6380 else
6381 return false;
6383 /* Check if VAL is present in every STEP-th element, and the
6384 other elements are filled with its most significant bit. */
6385 for (i = 1; i < nunits; ++i)
6387 HOST_WIDE_INT desired_val;
6388 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6389 if ((i & (step - 1)) == 0)
6390 desired_val = val;
6391 else
6392 desired_val = msb_val;
6394 if (desired_val != const_vector_elt_as_int (op, elt))
6395 return false;
6398 return true;
6401 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6402 instruction, filling in the bottom elements with 0 or -1.
6404 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6405 for the number of zeroes to shift in, or negative for the number of 0xff
6406 bytes to shift in.
6408 OP is a CONST_VECTOR. */
6411 vspltis_shifted (rtx op)
6413 machine_mode mode = GET_MODE (op);
6414 machine_mode inner = GET_MODE_INNER (mode);
6416 unsigned i, j;
6417 unsigned nunits;
6418 unsigned mask;
6420 HOST_WIDE_INT val;
6422 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6423 return false;
6425 /* We need to create pseudo registers to do the shift, so don't recognize
6426 shift vector constants after reload. */
6427 if (!can_create_pseudo_p ())
6428 return false;
6430 nunits = GET_MODE_NUNITS (mode);
6431 mask = GET_MODE_MASK (inner);
6433 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6435 /* Check if the value can really be the operand of a vspltis[bhw]. */
6436 if (EASY_VECTOR_15 (val))
6439 /* Also check if we are loading up the most significant bit which can be done
6440 by loading up -1 and shifting the value left by -1. */
6441 else if (EASY_VECTOR_MSB (val, inner))
6444 else
6445 return 0;
6447 /* Check if VAL is present in every STEP-th element until we find elements
6448 that are 0 or all 1 bits. */
6449 for (i = 1; i < nunits; ++i)
6451 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6452 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6454 /* If the value isn't the splat value, check for the remaining elements
6455 being 0/-1. */
6456 if (val != elt_val)
6458 if (elt_val == 0)
6460 for (j = i+1; j < nunits; ++j)
6462 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6463 if (const_vector_elt_as_int (op, elt2) != 0)
6464 return 0;
6467 return (nunits - i) * GET_MODE_SIZE (inner);
6470 else if ((elt_val & mask) == mask)
6472 for (j = i+1; j < nunits; ++j)
6474 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6475 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6476 return 0;
6479 return -((nunits - i) * GET_MODE_SIZE (inner));
6482 else
6483 return 0;
6487 /* If all elements are equal, we don't need to do VLSDOI. */
6488 return 0;
6492 /* Return true if OP is of the given MODE and can be synthesized
6493 with a vspltisb, vspltish or vspltisw. */
6495 bool
6496 easy_altivec_constant (rtx op, machine_mode mode)
6498 unsigned step, copies;
6500 if (mode == VOIDmode)
6501 mode = GET_MODE (op);
6502 else if (mode != GET_MODE (op))
6503 return false;
6505 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6506 constants. */
6507 if (mode == V2DFmode)
6508 return zero_constant (op, mode);
6510 else if (mode == V2DImode)
6512 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6513 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6514 return false;
6516 if (zero_constant (op, mode))
6517 return true;
6519 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6520 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6521 return true;
6523 return false;
6526 /* V1TImode is a special container for TImode. Ignore for now. */
6527 else if (mode == V1TImode)
6528 return false;
6530 /* Start with a vspltisw. */
6531 step = GET_MODE_NUNITS (mode) / 4;
6532 copies = 1;
6534 if (vspltis_constant (op, step, copies))
6535 return true;
6537 /* Then try with a vspltish. */
6538 if (step == 1)
6539 copies <<= 1;
6540 else
6541 step >>= 1;
6543 if (vspltis_constant (op, step, copies))
6544 return true;
6546 /* And finally a vspltisb. */
6547 if (step == 1)
6548 copies <<= 1;
6549 else
6550 step >>= 1;
6552 if (vspltis_constant (op, step, copies))
6553 return true;
6555 if (vspltis_shifted (op) != 0)
6556 return true;
6558 return false;
6561 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6562 result is OP. Abort if it is not possible. */
6565 gen_easy_altivec_constant (rtx op)
6567 machine_mode mode = GET_MODE (op);
6568 int nunits = GET_MODE_NUNITS (mode);
6569 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6570 unsigned step = nunits / 4;
6571 unsigned copies = 1;
6573 /* Start with a vspltisw. */
6574 if (vspltis_constant (op, step, copies))
6575 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6577 /* Then try with a vspltish. */
6578 if (step == 1)
6579 copies <<= 1;
6580 else
6581 step >>= 1;
6583 if (vspltis_constant (op, step, copies))
6584 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6586 /* And finally a vspltisb. */
6587 if (step == 1)
6588 copies <<= 1;
6589 else
6590 step >>= 1;
6592 if (vspltis_constant (op, step, copies))
6593 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6595 gcc_unreachable ();
6598 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6599 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6601 Return the number of instructions needed (1 or 2) into the address pointed
6602 via NUM_INSNS_PTR.
6604 Return the constant that is being split via CONSTANT_PTR. */
6606 bool
6607 xxspltib_constant_p (rtx op,
6608 machine_mode mode,
6609 int *num_insns_ptr,
6610 int *constant_ptr)
6612 size_t nunits = GET_MODE_NUNITS (mode);
6613 size_t i;
6614 HOST_WIDE_INT value;
6615 rtx element;
6617 /* Set the returned values to out of bound values. */
6618 *num_insns_ptr = -1;
6619 *constant_ptr = 256;
6621 if (!TARGET_P9_VECTOR)
6622 return false;
6624 if (mode == VOIDmode)
6625 mode = GET_MODE (op);
6627 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6628 return false;
6630 /* Handle (vec_duplicate <constant>). */
6631 if (GET_CODE (op) == VEC_DUPLICATE)
6633 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6634 && mode != V2DImode)
6635 return false;
6637 element = XEXP (op, 0);
6638 if (!CONST_INT_P (element))
6639 return false;
6641 value = INTVAL (element);
6642 if (!IN_RANGE (value, -128, 127))
6643 return false;
6646 /* Handle (const_vector [...]). */
6647 else if (GET_CODE (op) == CONST_VECTOR)
6649 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6650 && mode != V2DImode)
6651 return false;
6653 element = CONST_VECTOR_ELT (op, 0);
6654 if (!CONST_INT_P (element))
6655 return false;
6657 value = INTVAL (element);
6658 if (!IN_RANGE (value, -128, 127))
6659 return false;
6661 for (i = 1; i < nunits; i++)
6663 element = CONST_VECTOR_ELT (op, i);
6664 if (!CONST_INT_P (element))
6665 return false;
6667 if (value != INTVAL (element))
6668 return false;
6672 /* Handle integer constants being loaded into the upper part of the VSX
6673 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6674 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6675 else if (CONST_INT_P (op))
6677 if (!SCALAR_INT_MODE_P (mode))
6678 return false;
6680 value = INTVAL (op);
6681 if (!IN_RANGE (value, -128, 127))
6682 return false;
6684 if (!IN_RANGE (value, -1, 0))
6686 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6687 return false;
6689 if (EASY_VECTOR_15 (value))
6690 return false;
6694 else
6695 return false;
6697 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6698 sign extend. Special case 0/-1 to allow getting any VSX register instead
6699 of an Altivec register. */
6700 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6701 && EASY_VECTOR_15 (value))
6702 return false;
6704 /* Return # of instructions and the constant byte for XXSPLTIB. */
6705 if (mode == V16QImode)
6706 *num_insns_ptr = 1;
6708 else if (IN_RANGE (value, -1, 0))
6709 *num_insns_ptr = 1;
6711 else
6712 *num_insns_ptr = 2;
6714 *constant_ptr = (int) value;
6715 return true;
6718 const char *
6719 output_vec_const_move (rtx *operands)
6721 int shift;
6722 machine_mode mode;
6723 rtx dest, vec;
6725 dest = operands[0];
6726 vec = operands[1];
6727 mode = GET_MODE (dest);
6729 if (TARGET_VSX)
6731 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6732 int xxspltib_value = 256;
6733 int num_insns = -1;
6735 if (zero_constant (vec, mode))
6737 if (TARGET_P9_VECTOR)
6738 return "xxspltib %x0,0";
6740 else if (dest_vmx_p)
6741 return "vspltisw %0,0";
6743 else
6744 return "xxlxor %x0,%x0,%x0";
6747 if (all_ones_constant (vec, mode))
6749 if (TARGET_P9_VECTOR)
6750 return "xxspltib %x0,255";
6752 else if (dest_vmx_p)
6753 return "vspltisw %0,-1";
6755 else if (TARGET_P8_VECTOR)
6756 return "xxlorc %x0,%x0,%x0";
6758 else
6759 gcc_unreachable ();
6762 if (TARGET_P9_VECTOR
6763 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6765 if (num_insns == 1)
6767 operands[2] = GEN_INT (xxspltib_value & 0xff);
6768 return "xxspltib %x0,%2";
6771 return "#";
6775 if (TARGET_ALTIVEC)
6777 rtx splat_vec;
6779 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6780 if (zero_constant (vec, mode))
6781 return "vspltisw %0,0";
6783 if (all_ones_constant (vec, mode))
6784 return "vspltisw %0,-1";
6786 /* Do we need to construct a value using VSLDOI? */
6787 shift = vspltis_shifted (vec);
6788 if (shift != 0)
6789 return "#";
6791 splat_vec = gen_easy_altivec_constant (vec);
6792 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6793 operands[1] = XEXP (splat_vec, 0);
6794 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6795 return "#";
6797 switch (GET_MODE (splat_vec))
6799 case E_V4SImode:
6800 return "vspltisw %0,%1";
6802 case E_V8HImode:
6803 return "vspltish %0,%1";
6805 case E_V16QImode:
6806 return "vspltisb %0,%1";
6808 default:
6809 gcc_unreachable ();
6813 gcc_unreachable ();
6816 /* Initialize TARGET of vector PAIRED to VALS. */
6818 void
6819 paired_expand_vector_init (rtx target, rtx vals)
6821 machine_mode mode = GET_MODE (target);
6822 int n_elts = GET_MODE_NUNITS (mode);
6823 int n_var = 0;
6824 rtx x, new_rtx, tmp, constant_op, op1, op2;
6825 int i;
6827 for (i = 0; i < n_elts; ++i)
6829 x = XVECEXP (vals, 0, i);
6830 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6831 ++n_var;
6833 if (n_var == 0)
6835 /* Load from constant pool. */
6836 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6837 return;
6840 if (n_var == 2)
6842 /* The vector is initialized only with non-constants. */
6843 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6844 XVECEXP (vals, 0, 1));
6846 emit_move_insn (target, new_rtx);
6847 return;
6850 /* One field is non-constant and the other one is a constant. Load the
6851 constant from the constant pool and use ps_merge instruction to
6852 construct the whole vector. */
6853 op1 = XVECEXP (vals, 0, 0);
6854 op2 = XVECEXP (vals, 0, 1);
6856 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6858 tmp = gen_reg_rtx (GET_MODE (constant_op));
6859 emit_move_insn (tmp, constant_op);
6861 if (CONSTANT_P (op1))
6862 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6863 else
6864 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6866 emit_move_insn (target, new_rtx);
6869 void
6870 paired_expand_vector_move (rtx operands[])
6872 rtx op0 = operands[0], op1 = operands[1];
6874 emit_move_insn (op0, op1);
6877 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6878 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6879 operands for the relation operation COND. This is a recursive
6880 function. */
6882 static void
6883 paired_emit_vector_compare (enum rtx_code rcode,
6884 rtx dest, rtx op0, rtx op1,
6885 rtx cc_op0, rtx cc_op1)
6887 rtx tmp = gen_reg_rtx (V2SFmode);
6888 rtx tmp1, max, min;
6890 gcc_assert (TARGET_PAIRED_FLOAT);
6891 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6893 switch (rcode)
6895 case LT:
6896 case LTU:
6897 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6898 return;
6899 case GE:
6900 case GEU:
6901 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6902 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6903 return;
6904 case LE:
6905 case LEU:
6906 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6907 return;
6908 case GT:
6909 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6910 return;
6911 case EQ:
6912 tmp1 = gen_reg_rtx (V2SFmode);
6913 max = gen_reg_rtx (V2SFmode);
6914 min = gen_reg_rtx (V2SFmode);
6915 gen_reg_rtx (V2SFmode);
6917 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6918 emit_insn (gen_selv2sf4
6919 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6920 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6921 emit_insn (gen_selv2sf4
6922 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6923 emit_insn (gen_subv2sf3 (tmp1, min, max));
6924 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6925 return;
6926 case NE:
6927 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6928 return;
6929 case UNLE:
6930 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6931 return;
6932 case UNLT:
6933 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6934 return;
6935 case UNGE:
6936 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6937 return;
6938 case UNGT:
6939 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6940 return;
6941 default:
6942 gcc_unreachable ();
6945 return;
6948 /* Emit vector conditional expression.
6949 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6950 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6953 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6954 rtx cond, rtx cc_op0, rtx cc_op1)
6956 enum rtx_code rcode = GET_CODE (cond);
6958 if (!TARGET_PAIRED_FLOAT)
6959 return 0;
6961 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6963 return 1;
6966 /* Initialize vector TARGET to VALS. */
6968 void
6969 rs6000_expand_vector_init (rtx target, rtx vals)
6971 machine_mode mode = GET_MODE (target);
6972 machine_mode inner_mode = GET_MODE_INNER (mode);
6973 int n_elts = GET_MODE_NUNITS (mode);
6974 int n_var = 0, one_var = -1;
6975 bool all_same = true, all_const_zero = true;
6976 rtx x, mem;
6977 int i;
6979 for (i = 0; i < n_elts; ++i)
6981 x = XVECEXP (vals, 0, i);
6982 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6983 ++n_var, one_var = i;
6984 else if (x != CONST0_RTX (inner_mode))
6985 all_const_zero = false;
6987 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6988 all_same = false;
6991 if (n_var == 0)
6993 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6994 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6995 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6997 /* Zero register. */
6998 emit_move_insn (target, CONST0_RTX (mode));
6999 return;
7001 else if (int_vector_p && easy_vector_constant (const_vec, mode))
7003 /* Splat immediate. */
7004 emit_insn (gen_rtx_SET (target, const_vec));
7005 return;
7007 else
7009 /* Load from constant pool. */
7010 emit_move_insn (target, const_vec);
7011 return;
7015 /* Double word values on VSX can use xxpermdi or lxvdsx. */
7016 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
7018 rtx op[2];
7019 size_t i;
7020 size_t num_elements = all_same ? 1 : 2;
7021 for (i = 0; i < num_elements; i++)
7023 op[i] = XVECEXP (vals, 0, i);
7024 /* Just in case there is a SUBREG with a smaller mode, do a
7025 conversion. */
7026 if (GET_MODE (op[i]) != inner_mode)
7028 rtx tmp = gen_reg_rtx (inner_mode);
7029 convert_move (tmp, op[i], 0);
7030 op[i] = tmp;
7032 /* Allow load with splat double word. */
7033 else if (MEM_P (op[i]))
7035 if (!all_same)
7036 op[i] = force_reg (inner_mode, op[i]);
7038 else if (!REG_P (op[i]))
7039 op[i] = force_reg (inner_mode, op[i]);
7042 if (all_same)
7044 if (mode == V2DFmode)
7045 emit_insn (gen_vsx_splat_v2df (target, op[0]));
7046 else
7047 emit_insn (gen_vsx_splat_v2di (target, op[0]));
7049 else
7051 if (mode == V2DFmode)
7052 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7053 else
7054 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7056 return;
7059 /* Special case initializing vector int if we are on 64-bit systems with
7060 direct move or we have the ISA 3.0 instructions. */
7061 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7062 && TARGET_DIRECT_MOVE_64BIT)
7064 if (all_same)
7066 rtx element0 = XVECEXP (vals, 0, 0);
7067 if (MEM_P (element0))
7068 element0 = rs6000_address_for_fpconvert (element0);
7069 else
7070 element0 = force_reg (SImode, element0);
7072 if (TARGET_P9_VECTOR)
7073 emit_insn (gen_vsx_splat_v4si (target, element0));
7074 else
7076 rtx tmp = gen_reg_rtx (DImode);
7077 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7078 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7080 return;
7082 else
7084 rtx elements[4];
7085 size_t i;
7087 for (i = 0; i < 4; i++)
7089 elements[i] = XVECEXP (vals, 0, i);
7090 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7091 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7094 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7095 elements[2], elements[3]));
7096 return;
7100 /* With single precision floating point on VSX, know that internally single
7101 precision is actually represented as a double, and either make 2 V2DF
7102 vectors, and convert these vectors to single precision, or do one
7103 conversion, and splat the result to the other elements. */
7104 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7106 if (all_same)
7108 rtx element0 = XVECEXP (vals, 0, 0);
7110 if (TARGET_P9_VECTOR)
7112 if (MEM_P (element0))
7113 element0 = rs6000_address_for_fpconvert (element0);
7115 emit_insn (gen_vsx_splat_v4sf (target, element0));
7118 else
7120 rtx freg = gen_reg_rtx (V4SFmode);
7121 rtx sreg = force_reg (SFmode, element0);
7122 rtx cvt = (TARGET_XSCVDPSPN
7123 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7124 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7126 emit_insn (cvt);
7127 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7128 const0_rtx));
7131 else
7133 rtx dbl_even = gen_reg_rtx (V2DFmode);
7134 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7135 rtx flt_even = gen_reg_rtx (V4SFmode);
7136 rtx flt_odd = gen_reg_rtx (V4SFmode);
7137 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7138 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7139 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7140 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7142 /* Use VMRGEW if we can instead of doing a permute. */
7143 if (TARGET_P8_VECTOR)
7145 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7146 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7147 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7148 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7149 if (BYTES_BIG_ENDIAN)
7150 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7151 else
7152 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7154 else
7156 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7157 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7158 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7159 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7160 rs6000_expand_extract_even (target, flt_even, flt_odd);
7163 return;
7166 /* Special case initializing vector short/char that are splats if we are on
7167 64-bit systems with direct move. */
7168 if (all_same && TARGET_DIRECT_MOVE_64BIT
7169 && (mode == V16QImode || mode == V8HImode))
7171 rtx op0 = XVECEXP (vals, 0, 0);
7172 rtx di_tmp = gen_reg_rtx (DImode);
7174 if (!REG_P (op0))
7175 op0 = force_reg (GET_MODE_INNER (mode), op0);
7177 if (mode == V16QImode)
7179 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7180 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7181 return;
7184 if (mode == V8HImode)
7186 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7187 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7188 return;
7192 /* Store value to stack temp. Load vector element. Splat. However, splat
7193 of 64-bit items is not supported on Altivec. */
7194 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7196 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7197 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7198 XVECEXP (vals, 0, 0));
7199 x = gen_rtx_UNSPEC (VOIDmode,
7200 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7201 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7202 gen_rtvec (2,
7203 gen_rtx_SET (target, mem),
7204 x)));
7205 x = gen_rtx_VEC_SELECT (inner_mode, target,
7206 gen_rtx_PARALLEL (VOIDmode,
7207 gen_rtvec (1, const0_rtx)));
7208 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7209 return;
7212 /* One field is non-constant. Load constant then overwrite
7213 varying field. */
7214 if (n_var == 1)
7216 rtx copy = copy_rtx (vals);
7218 /* Load constant part of vector, substitute neighboring value for
7219 varying element. */
7220 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7221 rs6000_expand_vector_init (target, copy);
7223 /* Insert variable. */
7224 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7225 return;
7228 /* Construct the vector in memory one field at a time
7229 and load the whole vector. */
7230 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7231 for (i = 0; i < n_elts; i++)
7232 emit_move_insn (adjust_address_nv (mem, inner_mode,
7233 i * GET_MODE_SIZE (inner_mode)),
7234 XVECEXP (vals, 0, i));
7235 emit_move_insn (target, mem);
7238 /* Set field ELT of TARGET to VAL. */
7240 void
7241 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7243 machine_mode mode = GET_MODE (target);
7244 machine_mode inner_mode = GET_MODE_INNER (mode);
7245 rtx reg = gen_reg_rtx (mode);
7246 rtx mask, mem, x;
7247 int width = GET_MODE_SIZE (inner_mode);
7248 int i;
7250 val = force_reg (GET_MODE (val), val);
7252 if (VECTOR_MEM_VSX_P (mode))
7254 rtx insn = NULL_RTX;
7255 rtx elt_rtx = GEN_INT (elt);
7257 if (mode == V2DFmode)
7258 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7260 else if (mode == V2DImode)
7261 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7263 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7265 if (mode == V4SImode)
7266 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7267 else if (mode == V8HImode)
7268 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7269 else if (mode == V16QImode)
7270 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7271 else if (mode == V4SFmode)
7272 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7275 if (insn)
7277 emit_insn (insn);
7278 return;
7282 /* Simplify setting single element vectors like V1TImode. */
7283 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7285 emit_move_insn (target, gen_lowpart (mode, val));
7286 return;
7289 /* Load single variable value. */
7290 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7291 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7292 x = gen_rtx_UNSPEC (VOIDmode,
7293 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7294 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7295 gen_rtvec (2,
7296 gen_rtx_SET (reg, mem),
7297 x)));
7299 /* Linear sequence. */
7300 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7301 for (i = 0; i < 16; ++i)
7302 XVECEXP (mask, 0, i) = GEN_INT (i);
7304 /* Set permute mask to insert element into target. */
7305 for (i = 0; i < width; ++i)
7306 XVECEXP (mask, 0, elt*width + i)
7307 = GEN_INT (i + 0x10);
7308 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7310 if (BYTES_BIG_ENDIAN)
7311 x = gen_rtx_UNSPEC (mode,
7312 gen_rtvec (3, target, reg,
7313 force_reg (V16QImode, x)),
7314 UNSPEC_VPERM);
7315 else
7317 if (TARGET_P9_VECTOR)
7318 x = gen_rtx_UNSPEC (mode,
7319 gen_rtvec (3, target, reg,
7320 force_reg (V16QImode, x)),
7321 UNSPEC_VPERMR);
7322 else
7324 /* Invert selector. We prefer to generate VNAND on P8 so
7325 that future fusion opportunities can kick in, but must
7326 generate VNOR elsewhere. */
7327 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7328 rtx iorx = (TARGET_P8_VECTOR
7329 ? gen_rtx_IOR (V16QImode, notx, notx)
7330 : gen_rtx_AND (V16QImode, notx, notx));
7331 rtx tmp = gen_reg_rtx (V16QImode);
7332 emit_insn (gen_rtx_SET (tmp, iorx));
7334 /* Permute with operands reversed and adjusted selector. */
7335 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7336 UNSPEC_VPERM);
7340 emit_insn (gen_rtx_SET (target, x));
7343 /* Extract field ELT from VEC into TARGET. */
7345 void
7346 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7348 machine_mode mode = GET_MODE (vec);
7349 machine_mode inner_mode = GET_MODE_INNER (mode);
7350 rtx mem;
7352 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7354 switch (mode)
7356 default:
7357 break;
7358 case E_V1TImode:
7359 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7360 emit_move_insn (target, gen_lowpart (TImode, vec));
7361 break;
7362 case E_V2DFmode:
7363 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7364 return;
7365 case E_V2DImode:
7366 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7367 return;
7368 case E_V4SFmode:
7369 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7370 return;
7371 case E_V16QImode:
7372 if (TARGET_DIRECT_MOVE_64BIT)
7374 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7375 return;
7377 else
7378 break;
7379 case E_V8HImode:
7380 if (TARGET_DIRECT_MOVE_64BIT)
7382 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7383 return;
7385 else
7386 break;
7387 case E_V4SImode:
7388 if (TARGET_DIRECT_MOVE_64BIT)
7390 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7391 return;
7393 break;
7396 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7397 && TARGET_DIRECT_MOVE_64BIT)
7399 if (GET_MODE (elt) != DImode)
7401 rtx tmp = gen_reg_rtx (DImode);
7402 convert_move (tmp, elt, 0);
7403 elt = tmp;
7405 else if (!REG_P (elt))
7406 elt = force_reg (DImode, elt);
7408 switch (mode)
7410 case E_V2DFmode:
7411 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7412 return;
7414 case E_V2DImode:
7415 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7416 return;
7418 case E_V4SFmode:
7419 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7420 return;
7422 case E_V4SImode:
7423 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7424 return;
7426 case E_V8HImode:
7427 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7428 return;
7430 case E_V16QImode:
7431 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7432 return;
7434 default:
7435 gcc_unreachable ();
7439 gcc_assert (CONST_INT_P (elt));
7441 /* Allocate mode-sized buffer. */
7442 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7444 emit_move_insn (mem, vec);
7446 /* Add offset to field within buffer matching vector element. */
7447 mem = adjust_address_nv (mem, inner_mode,
7448 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7450 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7453 /* Helper function to return the register number of a RTX. */
7454 static inline int
7455 regno_or_subregno (rtx op)
7457 if (REG_P (op))
7458 return REGNO (op);
7459 else if (SUBREG_P (op))
7460 return subreg_regno (op);
7461 else
7462 gcc_unreachable ();
7465 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7466 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7467 temporary (BASE_TMP) to fixup the address. Return the new memory address
7468 that is valid for reads or writes to a given register (SCALAR_REG). */
7471 rs6000_adjust_vec_address (rtx scalar_reg,
7472 rtx mem,
7473 rtx element,
7474 rtx base_tmp,
7475 machine_mode scalar_mode)
7477 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7478 rtx addr = XEXP (mem, 0);
7479 rtx element_offset;
7480 rtx new_addr;
7481 bool valid_addr_p;
7483 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7484 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7486 /* Calculate what we need to add to the address to get the element
7487 address. */
7488 if (CONST_INT_P (element))
7489 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7490 else
7492 int byte_shift = exact_log2 (scalar_size);
7493 gcc_assert (byte_shift >= 0);
7495 if (byte_shift == 0)
7496 element_offset = element;
7498 else
7500 if (TARGET_POWERPC64)
7501 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7502 else
7503 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7505 element_offset = base_tmp;
7509 /* Create the new address pointing to the element within the vector. If we
7510 are adding 0, we don't have to change the address. */
7511 if (element_offset == const0_rtx)
7512 new_addr = addr;
7514 /* A simple indirect address can be converted into a reg + offset
7515 address. */
7516 else if (REG_P (addr) || SUBREG_P (addr))
7517 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7519 /* Optimize D-FORM addresses with constant offset with a constant element, to
7520 include the element offset in the address directly. */
7521 else if (GET_CODE (addr) == PLUS)
7523 rtx op0 = XEXP (addr, 0);
7524 rtx op1 = XEXP (addr, 1);
7525 rtx insn;
7527 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7528 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7530 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7531 rtx offset_rtx = GEN_INT (offset);
7533 if (IN_RANGE (offset, -32768, 32767)
7534 && (scalar_size < 8 || (offset & 0x3) == 0))
7535 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7536 else
7538 emit_move_insn (base_tmp, offset_rtx);
7539 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7542 else
7544 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7545 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7547 /* Note, ADDI requires the register being added to be a base
7548 register. If the register was R0, load it up into the temporary
7549 and do the add. */
7550 if (op1_reg_p
7551 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7553 insn = gen_add3_insn (base_tmp, op1, element_offset);
7554 gcc_assert (insn != NULL_RTX);
7555 emit_insn (insn);
7558 else if (ele_reg_p
7559 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7561 insn = gen_add3_insn (base_tmp, element_offset, op1);
7562 gcc_assert (insn != NULL_RTX);
7563 emit_insn (insn);
7566 else
7568 emit_move_insn (base_tmp, op1);
7569 emit_insn (gen_add2_insn (base_tmp, element_offset));
7572 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7576 else
7578 emit_move_insn (base_tmp, addr);
7579 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7582 /* If we have a PLUS, we need to see whether the particular register class
7583 allows for D-FORM or X-FORM addressing. */
7584 if (GET_CODE (new_addr) == PLUS)
7586 rtx op1 = XEXP (new_addr, 1);
7587 addr_mask_type addr_mask;
7588 int scalar_regno = regno_or_subregno (scalar_reg);
7590 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7591 if (INT_REGNO_P (scalar_regno))
7592 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7594 else if (FP_REGNO_P (scalar_regno))
7595 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7597 else if (ALTIVEC_REGNO_P (scalar_regno))
7598 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7600 else
7601 gcc_unreachable ();
7603 if (REG_P (op1) || SUBREG_P (op1))
7604 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7605 else
7606 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7609 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7610 valid_addr_p = true;
7612 else
7613 valid_addr_p = false;
7615 if (!valid_addr_p)
7617 emit_move_insn (base_tmp, new_addr);
7618 new_addr = base_tmp;
7621 return change_address (mem, scalar_mode, new_addr);
7624 /* Split a variable vec_extract operation into the component instructions. */
7626 void
7627 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7628 rtx tmp_altivec)
7630 machine_mode mode = GET_MODE (src);
7631 machine_mode scalar_mode = GET_MODE (dest);
7632 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7633 int byte_shift = exact_log2 (scalar_size);
7635 gcc_assert (byte_shift >= 0);
7637 /* If we are given a memory address, optimize to load just the element. We
7638 don't have to adjust the vector element number on little endian
7639 systems. */
7640 if (MEM_P (src))
7642 gcc_assert (REG_P (tmp_gpr));
7643 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7644 tmp_gpr, scalar_mode));
7645 return;
7648 else if (REG_P (src) || SUBREG_P (src))
7650 int bit_shift = byte_shift + 3;
7651 rtx element2;
7652 int dest_regno = regno_or_subregno (dest);
7653 int src_regno = regno_or_subregno (src);
7654 int element_regno = regno_or_subregno (element);
7656 gcc_assert (REG_P (tmp_gpr));
7658 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7659 a general purpose register. */
7660 if (TARGET_P9_VECTOR
7661 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7662 && INT_REGNO_P (dest_regno)
7663 && ALTIVEC_REGNO_P (src_regno)
7664 && INT_REGNO_P (element_regno))
7666 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7667 rtx element_si = gen_rtx_REG (SImode, element_regno);
7669 if (mode == V16QImode)
7670 emit_insn (VECTOR_ELT_ORDER_BIG
7671 ? gen_vextublx (dest_si, element_si, src)
7672 : gen_vextubrx (dest_si, element_si, src));
7674 else if (mode == V8HImode)
7676 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7677 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7678 emit_insn (VECTOR_ELT_ORDER_BIG
7679 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7680 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7684 else
7686 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7687 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7688 emit_insn (VECTOR_ELT_ORDER_BIG
7689 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7690 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7693 return;
7697 gcc_assert (REG_P (tmp_altivec));
7699 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7700 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7701 will shift the element into the upper position (adding 3 to convert a
7702 byte shift into a bit shift). */
7703 if (scalar_size == 8)
7705 if (!VECTOR_ELT_ORDER_BIG)
7707 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7708 element2 = tmp_gpr;
7710 else
7711 element2 = element;
7713 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7714 bit. */
7715 emit_insn (gen_rtx_SET (tmp_gpr,
7716 gen_rtx_AND (DImode,
7717 gen_rtx_ASHIFT (DImode,
7718 element2,
7719 GEN_INT (6)),
7720 GEN_INT (64))));
7722 else
7724 if (!VECTOR_ELT_ORDER_BIG)
7726 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7728 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7729 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7730 element2 = tmp_gpr;
7732 else
7733 element2 = element;
7735 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7738 /* Get the value into the lower byte of the Altivec register where VSLO
7739 expects it. */
7740 if (TARGET_P9_VECTOR)
7741 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7742 else if (can_create_pseudo_p ())
7743 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7744 else
7746 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7747 emit_move_insn (tmp_di, tmp_gpr);
7748 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7751 /* Do the VSLO to get the value into the final location. */
7752 switch (mode)
7754 case E_V2DFmode:
7755 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7756 return;
7758 case E_V2DImode:
7759 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7760 return;
7762 case E_V4SFmode:
7764 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7765 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7766 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7767 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7768 tmp_altivec));
7770 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7771 return;
7774 case E_V4SImode:
7775 case E_V8HImode:
7776 case E_V16QImode:
7778 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7779 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7780 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7781 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7782 tmp_altivec));
7783 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7784 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7785 GEN_INT (64 - (8 * scalar_size))));
7786 return;
7789 default:
7790 gcc_unreachable ();
7793 return;
7795 else
7796 gcc_unreachable ();
7799 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7800 two SImode values. */
7802 static void
7803 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7805 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7807 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7809 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7810 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7812 emit_move_insn (dest, GEN_INT (const1 | const2));
7813 return;
7816 /* Put si1 into upper 32-bits of dest. */
7817 if (CONST_INT_P (si1))
7818 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7819 else
7821 /* Generate RLDIC. */
7822 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7823 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7824 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7825 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7826 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7827 emit_insn (gen_rtx_SET (dest, and_rtx));
7830 /* Put si2 into the temporary. */
7831 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7832 if (CONST_INT_P (si2))
7833 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7834 else
7835 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7837 /* Combine the two parts. */
7838 emit_insn (gen_iordi3 (dest, dest, tmp));
7839 return;
7842 /* Split a V4SI initialization. */
7844 void
7845 rs6000_split_v4si_init (rtx operands[])
7847 rtx dest = operands[0];
7849 /* Destination is a GPR, build up the two DImode parts in place. */
7850 if (REG_P (dest) || SUBREG_P (dest))
7852 int d_regno = regno_or_subregno (dest);
7853 rtx scalar1 = operands[1];
7854 rtx scalar2 = operands[2];
7855 rtx scalar3 = operands[3];
7856 rtx scalar4 = operands[4];
7857 rtx tmp1 = operands[5];
7858 rtx tmp2 = operands[6];
7860 /* Even though we only need one temporary (plus the destination, which
7861 has an early clobber constraint, try to use two temporaries, one for
7862 each double word created. That way the 2nd insn scheduling pass can
7863 rearrange things so the two parts are done in parallel. */
7864 if (BYTES_BIG_ENDIAN)
7866 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7867 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7868 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7869 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7871 else
7873 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7874 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7875 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7876 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7877 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7879 return;
7882 else
7883 gcc_unreachable ();
7886 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7887 selects whether the alignment is abi mandated, optional, or
7888 both abi and optional alignment. */
7890 unsigned int
7891 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7893 if (how != align_opt)
7895 if (TREE_CODE (type) == VECTOR_TYPE)
7897 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type)))
7899 if (align < 64)
7900 align = 64;
7902 else if (align < 128)
7903 align = 128;
7907 if (how != align_abi)
7909 if (TREE_CODE (type) == ARRAY_TYPE
7910 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7912 if (align < BITS_PER_WORD)
7913 align = BITS_PER_WORD;
7917 return align;
7920 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7921 instructions simply ignore the low bits; VSX memory instructions
7922 are aligned to 4 or 8 bytes. */
7924 static bool
7925 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7927 return (STRICT_ALIGNMENT
7928 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7929 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7930 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7931 && (int) align < VECTOR_ALIGN (mode)))));
7934 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7936 bool
7937 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7939 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7941 if (computed != 128)
7943 static bool warned;
7944 if (!warned && warn_psabi)
7946 warned = true;
7947 inform (input_location,
7948 "the layout of aggregates containing vectors with"
7949 " %d-byte alignment has changed in GCC 5",
7950 computed / BITS_PER_UNIT);
7953 /* In current GCC there is no special case. */
7954 return false;
7957 return false;
7960 /* AIX increases natural record alignment to doubleword if the first
7961 field is an FP double while the FP fields remain word aligned. */
7963 unsigned int
7964 rs6000_special_round_type_align (tree type, unsigned int computed,
7965 unsigned int specified)
7967 unsigned int align = MAX (computed, specified);
7968 tree field = TYPE_FIELDS (type);
7970 /* Skip all non field decls */
7971 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7972 field = DECL_CHAIN (field);
7974 if (field != NULL && field != type)
7976 type = TREE_TYPE (field);
7977 while (TREE_CODE (type) == ARRAY_TYPE)
7978 type = TREE_TYPE (type);
7980 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7981 align = MAX (align, 64);
7984 return align;
7987 /* Darwin increases record alignment to the natural alignment of
7988 the first field. */
7990 unsigned int
7991 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7992 unsigned int specified)
7994 unsigned int align = MAX (computed, specified);
7996 if (TYPE_PACKED (type))
7997 return align;
7999 /* Find the first field, looking down into aggregates. */
8000 do {
8001 tree field = TYPE_FIELDS (type);
8002 /* Skip all non field decls */
8003 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
8004 field = DECL_CHAIN (field);
8005 if (! field)
8006 break;
8007 /* A packed field does not contribute any extra alignment. */
8008 if (DECL_PACKED (field))
8009 return align;
8010 type = TREE_TYPE (field);
8011 while (TREE_CODE (type) == ARRAY_TYPE)
8012 type = TREE_TYPE (type);
8013 } while (AGGREGATE_TYPE_P (type));
8015 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8016 align = MAX (align, TYPE_ALIGN (type));
8018 return align;
8021 /* Return 1 for an operand in small memory on V.4/eabi. */
8024 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8025 machine_mode mode ATTRIBUTE_UNUSED)
8027 #if TARGET_ELF
8028 rtx sym_ref;
8030 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8031 return 0;
8033 if (DEFAULT_ABI != ABI_V4)
8034 return 0;
8036 if (GET_CODE (op) == SYMBOL_REF)
8037 sym_ref = op;
8039 else if (GET_CODE (op) != CONST
8040 || GET_CODE (XEXP (op, 0)) != PLUS
8041 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8042 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8043 return 0;
8045 else
8047 rtx sum = XEXP (op, 0);
8048 HOST_WIDE_INT summand;
8050 /* We have to be careful here, because it is the referenced address
8051 that must be 32k from _SDA_BASE_, not just the symbol. */
8052 summand = INTVAL (XEXP (sum, 1));
8053 if (summand < 0 || summand > g_switch_value)
8054 return 0;
8056 sym_ref = XEXP (sum, 0);
8059 return SYMBOL_REF_SMALL_P (sym_ref);
8060 #else
8061 return 0;
8062 #endif
8065 /* Return true if either operand is a general purpose register. */
8067 bool
8068 gpr_or_gpr_p (rtx op0, rtx op1)
8070 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8071 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8074 /* Return true if this is a move direct operation between GPR registers and
8075 floating point/VSX registers. */
8077 bool
8078 direct_move_p (rtx op0, rtx op1)
8080 int regno0, regno1;
8082 if (!REG_P (op0) || !REG_P (op1))
8083 return false;
8085 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8086 return false;
8088 regno0 = REGNO (op0);
8089 regno1 = REGNO (op1);
8090 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8091 return false;
8093 if (INT_REGNO_P (regno0))
8094 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8096 else if (INT_REGNO_P (regno1))
8098 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8099 return true;
8101 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8102 return true;
8105 return false;
8108 /* Return true if the OFFSET is valid for the quad address instructions that
8109 use d-form (register + offset) addressing. */
8111 static inline bool
8112 quad_address_offset_p (HOST_WIDE_INT offset)
8114 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8117 /* Return true if the ADDR is an acceptable address for a quad memory
8118 operation of mode MODE (either LQ/STQ for general purpose registers, or
8119 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8120 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8121 3.0 LXV/STXV instruction. */
8123 bool
8124 quad_address_p (rtx addr, machine_mode mode, bool strict)
8126 rtx op0, op1;
8128 if (GET_MODE_SIZE (mode) != 16)
8129 return false;
8131 if (legitimate_indirect_address_p (addr, strict))
8132 return true;
8134 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8135 return false;
8137 if (GET_CODE (addr) != PLUS)
8138 return false;
8140 op0 = XEXP (addr, 0);
8141 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8142 return false;
8144 op1 = XEXP (addr, 1);
8145 if (!CONST_INT_P (op1))
8146 return false;
8148 return quad_address_offset_p (INTVAL (op1));
8151 /* Return true if this is a load or store quad operation. This function does
8152 not handle the atomic quad memory instructions. */
8154 bool
8155 quad_load_store_p (rtx op0, rtx op1)
8157 bool ret;
8159 if (!TARGET_QUAD_MEMORY)
8160 ret = false;
8162 else if (REG_P (op0) && MEM_P (op1))
8163 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8164 && quad_memory_operand (op1, GET_MODE (op1))
8165 && !reg_overlap_mentioned_p (op0, op1));
8167 else if (MEM_P (op0) && REG_P (op1))
8168 ret = (quad_memory_operand (op0, GET_MODE (op0))
8169 && quad_int_reg_operand (op1, GET_MODE (op1)));
8171 else
8172 ret = false;
8174 if (TARGET_DEBUG_ADDR)
8176 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8177 ret ? "true" : "false");
8178 debug_rtx (gen_rtx_SET (op0, op1));
8181 return ret;
8184 /* Given an address, return a constant offset term if one exists. */
8186 static rtx
8187 address_offset (rtx op)
8189 if (GET_CODE (op) == PRE_INC
8190 || GET_CODE (op) == PRE_DEC)
8191 op = XEXP (op, 0);
8192 else if (GET_CODE (op) == PRE_MODIFY
8193 || GET_CODE (op) == LO_SUM)
8194 op = XEXP (op, 1);
8196 if (GET_CODE (op) == CONST)
8197 op = XEXP (op, 0);
8199 if (GET_CODE (op) == PLUS)
8200 op = XEXP (op, 1);
8202 if (CONST_INT_P (op))
8203 return op;
8205 return NULL_RTX;
8208 /* Return true if the MEM operand is a memory operand suitable for use
8209 with a (full width, possibly multiple) gpr load/store. On
8210 powerpc64 this means the offset must be divisible by 4.
8211 Implements 'Y' constraint.
8213 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8214 a constraint function we know the operand has satisfied a suitable
8215 memory predicate. Also accept some odd rtl generated by reload
8216 (see rs6000_legitimize_reload_address for various forms). It is
8217 important that reload rtl be accepted by appropriate constraints
8218 but not by the operand predicate.
8220 Offsetting a lo_sum should not be allowed, except where we know by
8221 alignment that a 32k boundary is not crossed, but see the ???
8222 comment in rs6000_legitimize_reload_address. Note that by
8223 "offsetting" here we mean a further offset to access parts of the
8224 MEM. It's fine to have a lo_sum where the inner address is offset
8225 from a sym, since the same sym+offset will appear in the high part
8226 of the address calculation. */
8228 bool
8229 mem_operand_gpr (rtx op, machine_mode mode)
8231 unsigned HOST_WIDE_INT offset;
8232 int extra;
8233 rtx addr = XEXP (op, 0);
8235 op = address_offset (addr);
8236 if (op == NULL_RTX)
8237 return true;
8239 offset = INTVAL (op);
8240 if (TARGET_POWERPC64 && (offset & 3) != 0)
8241 return false;
8243 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8244 if (extra < 0)
8245 extra = 0;
8247 if (GET_CODE (addr) == LO_SUM)
8248 /* For lo_sum addresses, we must allow any offset except one that
8249 causes a wrap, so test only the low 16 bits. */
8250 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8252 return offset + 0x8000 < 0x10000u - extra;
8255 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8256 enforce an offset divisible by 4 even for 32-bit. */
8258 bool
8259 mem_operand_ds_form (rtx op, machine_mode mode)
8261 unsigned HOST_WIDE_INT offset;
8262 int extra;
8263 rtx addr = XEXP (op, 0);
8265 if (!offsettable_address_p (false, mode, addr))
8266 return false;
8268 op = address_offset (addr);
8269 if (op == NULL_RTX)
8270 return true;
8272 offset = INTVAL (op);
8273 if ((offset & 3) != 0)
8274 return false;
8276 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8277 if (extra < 0)
8278 extra = 0;
8280 if (GET_CODE (addr) == LO_SUM)
8281 /* For lo_sum addresses, we must allow any offset except one that
8282 causes a wrap, so test only the low 16 bits. */
8283 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8285 return offset + 0x8000 < 0x10000u - extra;
8288 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8290 static bool
8291 reg_offset_addressing_ok_p (machine_mode mode)
8293 switch (mode)
8295 case E_V16QImode:
8296 case E_V8HImode:
8297 case E_V4SFmode:
8298 case E_V4SImode:
8299 case E_V2DFmode:
8300 case E_V2DImode:
8301 case E_V1TImode:
8302 case E_TImode:
8303 case E_TFmode:
8304 case E_KFmode:
8305 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8306 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8307 a vector mode, if we want to use the VSX registers to move it around,
8308 we need to restrict ourselves to reg+reg addressing. Similarly for
8309 IEEE 128-bit floating point that is passed in a single vector
8310 register. */
8311 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8312 return mode_supports_vsx_dform_quad (mode);
8313 break;
8315 case E_V2SImode:
8316 case E_V2SFmode:
8317 /* Paired vector modes. Only reg+reg addressing is valid. */
8318 if (TARGET_PAIRED_FLOAT)
8319 return false;
8320 break;
8322 case E_SDmode:
8323 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8324 addressing for the LFIWZX and STFIWX instructions. */
8325 if (TARGET_NO_SDMODE_STACK)
8326 return false;
8327 break;
8329 default:
8330 break;
8333 return true;
8336 static bool
8337 virtual_stack_registers_memory_p (rtx op)
8339 int regnum;
8341 if (GET_CODE (op) == REG)
8342 regnum = REGNO (op);
8344 else if (GET_CODE (op) == PLUS
8345 && GET_CODE (XEXP (op, 0)) == REG
8346 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8347 regnum = REGNO (XEXP (op, 0));
8349 else
8350 return false;
8352 return (regnum >= FIRST_VIRTUAL_REGISTER
8353 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8356 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8357 is known to not straddle a 32k boundary. This function is used
8358 to determine whether -mcmodel=medium code can use TOC pointer
8359 relative addressing for OP. This means the alignment of the TOC
8360 pointer must also be taken into account, and unfortunately that is
8361 only 8 bytes. */
8363 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8364 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8365 #endif
8367 static bool
8368 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8369 machine_mode mode)
8371 tree decl;
8372 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8374 if (GET_CODE (op) != SYMBOL_REF)
8375 return false;
8377 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8378 SYMBOL_REF. */
8379 if (mode_supports_vsx_dform_quad (mode))
8380 return false;
8382 dsize = GET_MODE_SIZE (mode);
8383 decl = SYMBOL_REF_DECL (op);
8384 if (!decl)
8386 if (dsize == 0)
8387 return false;
8389 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8390 replacing memory addresses with an anchor plus offset. We
8391 could find the decl by rummaging around in the block->objects
8392 VEC for the given offset but that seems like too much work. */
8393 dalign = BITS_PER_UNIT;
8394 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8395 && SYMBOL_REF_ANCHOR_P (op)
8396 && SYMBOL_REF_BLOCK (op) != NULL)
8398 struct object_block *block = SYMBOL_REF_BLOCK (op);
8400 dalign = block->alignment;
8401 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8403 else if (CONSTANT_POOL_ADDRESS_P (op))
8405 /* It would be nice to have get_pool_align().. */
8406 machine_mode cmode = get_pool_mode (op);
8408 dalign = GET_MODE_ALIGNMENT (cmode);
8411 else if (DECL_P (decl))
8413 dalign = DECL_ALIGN (decl);
8415 if (dsize == 0)
8417 /* Allow BLKmode when the entire object is known to not
8418 cross a 32k boundary. */
8419 if (!DECL_SIZE_UNIT (decl))
8420 return false;
8422 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8423 return false;
8425 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8426 if (dsize > 32768)
8427 return false;
8429 dalign /= BITS_PER_UNIT;
8430 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8431 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8432 return dalign >= dsize;
8435 else
8436 gcc_unreachable ();
8438 /* Find how many bits of the alignment we know for this access. */
8439 dalign /= BITS_PER_UNIT;
8440 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8441 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8442 mask = dalign - 1;
8443 lsb = offset & -offset;
8444 mask &= lsb - 1;
8445 dalign = mask + 1;
8447 return dalign >= dsize;
8450 static bool
8451 constant_pool_expr_p (rtx op)
8453 rtx base, offset;
8455 split_const (op, &base, &offset);
8456 return (GET_CODE (base) == SYMBOL_REF
8457 && CONSTANT_POOL_ADDRESS_P (base)
8458 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8461 /* These are only used to pass through from print_operand/print_operand_address
8462 to rs6000_output_addr_const_extra over the intervening function
8463 output_addr_const which is not target code. */
8464 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8466 /* Return true if OP is a toc pointer relative address (the output
8467 of create_TOC_reference). If STRICT, do not match non-split
8468 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8469 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8470 TOCREL_OFFSET_RET respectively. */
8472 bool
8473 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8474 const_rtx *tocrel_offset_ret)
8476 if (!TARGET_TOC)
8477 return false;
8479 if (TARGET_CMODEL != CMODEL_SMALL)
8481 /* When strict ensure we have everything tidy. */
8482 if (strict
8483 && !(GET_CODE (op) == LO_SUM
8484 && REG_P (XEXP (op, 0))
8485 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8486 return false;
8488 /* When not strict, allow non-split TOC addresses and also allow
8489 (lo_sum (high ..)) TOC addresses created during reload. */
8490 if (GET_CODE (op) == LO_SUM)
8491 op = XEXP (op, 1);
8494 const_rtx tocrel_base = op;
8495 const_rtx tocrel_offset = const0_rtx;
8497 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8499 tocrel_base = XEXP (op, 0);
8500 tocrel_offset = XEXP (op, 1);
8503 if (tocrel_base_ret)
8504 *tocrel_base_ret = tocrel_base;
8505 if (tocrel_offset_ret)
8506 *tocrel_offset_ret = tocrel_offset;
8508 return (GET_CODE (tocrel_base) == UNSPEC
8509 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8512 /* Return true if X is a constant pool address, and also for cmodel=medium
8513 if X is a toc-relative address known to be offsettable within MODE. */
8515 bool
8516 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8517 bool strict)
8519 const_rtx tocrel_base, tocrel_offset;
8520 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8521 && (TARGET_CMODEL != CMODEL_MEDIUM
8522 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8523 || mode == QImode
8524 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8525 INTVAL (tocrel_offset), mode)));
8528 static bool
8529 legitimate_small_data_p (machine_mode mode, rtx x)
8531 return (DEFAULT_ABI == ABI_V4
8532 && !flag_pic && !TARGET_TOC
8533 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8534 && small_data_operand (x, mode));
8537 bool
8538 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8539 bool strict, bool worst_case)
8541 unsigned HOST_WIDE_INT offset;
8542 unsigned int extra;
8544 if (GET_CODE (x) != PLUS)
8545 return false;
8546 if (!REG_P (XEXP (x, 0)))
8547 return false;
8548 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8549 return false;
8550 if (mode_supports_vsx_dform_quad (mode))
8551 return quad_address_p (x, mode, strict);
8552 if (!reg_offset_addressing_ok_p (mode))
8553 return virtual_stack_registers_memory_p (x);
8554 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8555 return true;
8556 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8557 return false;
8559 offset = INTVAL (XEXP (x, 1));
8560 extra = 0;
8561 switch (mode)
8563 case E_V2SImode:
8564 case E_V2SFmode:
8565 /* Paired single modes: offset addressing isn't valid. */
8566 return false;
8568 case E_DFmode:
8569 case E_DDmode:
8570 case E_DImode:
8571 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8572 addressing. */
8573 if (VECTOR_MEM_VSX_P (mode))
8574 return false;
8576 if (!worst_case)
8577 break;
8578 if (!TARGET_POWERPC64)
8579 extra = 4;
8580 else if (offset & 3)
8581 return false;
8582 break;
8584 case E_TFmode:
8585 case E_IFmode:
8586 case E_KFmode:
8587 case E_TDmode:
8588 case E_TImode:
8589 case E_PTImode:
8590 extra = 8;
8591 if (!worst_case)
8592 break;
8593 if (!TARGET_POWERPC64)
8594 extra = 12;
8595 else if (offset & 3)
8596 return false;
8597 break;
8599 default:
8600 break;
8603 offset += 0x8000;
8604 return offset < 0x10000 - extra;
8607 bool
8608 legitimate_indexed_address_p (rtx x, int strict)
8610 rtx op0, op1;
8612 if (GET_CODE (x) != PLUS)
8613 return false;
8615 op0 = XEXP (x, 0);
8616 op1 = XEXP (x, 1);
8618 return (REG_P (op0) && REG_P (op1)
8619 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8620 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8621 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8622 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8625 bool
8626 avoiding_indexed_address_p (machine_mode mode)
8628 /* Avoid indexed addressing for modes that have non-indexed
8629 load/store instruction forms. */
8630 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8633 bool
8634 legitimate_indirect_address_p (rtx x, int strict)
8636 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8639 bool
8640 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8642 if (!TARGET_MACHO || !flag_pic
8643 || mode != SImode || GET_CODE (x) != MEM)
8644 return false;
8645 x = XEXP (x, 0);
8647 if (GET_CODE (x) != LO_SUM)
8648 return false;
8649 if (GET_CODE (XEXP (x, 0)) != REG)
8650 return false;
8651 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8652 return false;
8653 x = XEXP (x, 1);
8655 return CONSTANT_P (x);
8658 static bool
8659 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8661 if (GET_CODE (x) != LO_SUM)
8662 return false;
8663 if (GET_CODE (XEXP (x, 0)) != REG)
8664 return false;
8665 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8666 return false;
8667 /* quad word addresses are restricted, and we can't use LO_SUM. */
8668 if (mode_supports_vsx_dform_quad (mode))
8669 return false;
8670 x = XEXP (x, 1);
8672 if (TARGET_ELF || TARGET_MACHO)
8674 bool large_toc_ok;
8676 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8677 return false;
8678 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8679 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8680 recognizes some LO_SUM addresses as valid although this
8681 function says opposite. In most cases, LRA through different
8682 transformations can generate correct code for address reloads.
8683 It can not manage only some LO_SUM cases. So we need to add
8684 code analogous to one in rs6000_legitimize_reload_address for
8685 LOW_SUM here saying that some addresses are still valid. */
8686 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8687 && small_toc_ref (x, VOIDmode));
8688 if (TARGET_TOC && ! large_toc_ok)
8689 return false;
8690 if (GET_MODE_NUNITS (mode) != 1)
8691 return false;
8692 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8693 && !(/* ??? Assume floating point reg based on mode? */
8694 TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
8695 && (mode == DFmode || mode == DDmode)))
8696 return false;
8698 return CONSTANT_P (x) || large_toc_ok;
8701 return false;
8705 /* Try machine-dependent ways of modifying an illegitimate address
8706 to be legitimate. If we find one, return the new, valid address.
8707 This is used from only one place: `memory_address' in explow.c.
8709 OLDX is the address as it was before break_out_memory_refs was
8710 called. In some cases it is useful to look at this to decide what
8711 needs to be done.
8713 It is always safe for this function to do nothing. It exists to
8714 recognize opportunities to optimize the output.
8716 On RS/6000, first check for the sum of a register with a constant
8717 integer that is out of range. If so, generate code to add the
8718 constant with the low-order 16 bits masked to the register and force
8719 this result into another register (this can be done with `cau').
8720 Then generate an address of REG+(CONST&0xffff), allowing for the
8721 possibility of bit 16 being a one.
8723 Then check for the sum of a register and something not constant, try to
8724 load the other things into a register and return the sum. */
8726 static rtx
8727 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8728 machine_mode mode)
8730 unsigned int extra;
8732 if (!reg_offset_addressing_ok_p (mode)
8733 || mode_supports_vsx_dform_quad (mode))
8735 if (virtual_stack_registers_memory_p (x))
8736 return x;
8738 /* In theory we should not be seeing addresses of the form reg+0,
8739 but just in case it is generated, optimize it away. */
8740 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8741 return force_reg (Pmode, XEXP (x, 0));
8743 /* For TImode with load/store quad, restrict addresses to just a single
8744 pointer, so it works with both GPRs and VSX registers. */
8745 /* Make sure both operands are registers. */
8746 else if (GET_CODE (x) == PLUS
8747 && (mode != TImode || !TARGET_VSX))
8748 return gen_rtx_PLUS (Pmode,
8749 force_reg (Pmode, XEXP (x, 0)),
8750 force_reg (Pmode, XEXP (x, 1)));
8751 else
8752 return force_reg (Pmode, x);
8754 if (GET_CODE (x) == SYMBOL_REF)
8756 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8757 if (model != 0)
8758 return rs6000_legitimize_tls_address (x, model);
8761 extra = 0;
8762 switch (mode)
8764 case E_TFmode:
8765 case E_TDmode:
8766 case E_TImode:
8767 case E_PTImode:
8768 case E_IFmode:
8769 case E_KFmode:
8770 /* As in legitimate_offset_address_p we do not assume
8771 worst-case. The mode here is just a hint as to the registers
8772 used. A TImode is usually in gprs, but may actually be in
8773 fprs. Leave worst-case scenario for reload to handle via
8774 insn constraints. PTImode is only GPRs. */
8775 extra = 8;
8776 break;
8777 default:
8778 break;
8781 if (GET_CODE (x) == PLUS
8782 && GET_CODE (XEXP (x, 0)) == REG
8783 && GET_CODE (XEXP (x, 1)) == CONST_INT
8784 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8785 >= 0x10000 - extra)
8786 && !PAIRED_VECTOR_MODE (mode))
8788 HOST_WIDE_INT high_int, low_int;
8789 rtx sum;
8790 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8791 if (low_int >= 0x8000 - extra)
8792 low_int = 0;
8793 high_int = INTVAL (XEXP (x, 1)) - low_int;
8794 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8795 GEN_INT (high_int)), 0);
8796 return plus_constant (Pmode, sum, low_int);
8798 else if (GET_CODE (x) == PLUS
8799 && GET_CODE (XEXP (x, 0)) == REG
8800 && GET_CODE (XEXP (x, 1)) != CONST_INT
8801 && GET_MODE_NUNITS (mode) == 1
8802 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8803 || (/* ??? Assume floating point reg based on mode? */
8804 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8805 && (mode == DFmode || mode == DDmode)))
8806 && !avoiding_indexed_address_p (mode))
8808 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8809 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8811 else if (PAIRED_VECTOR_MODE (mode))
8813 if (mode == DImode)
8814 return x;
8815 /* We accept [reg + reg]. */
8817 if (GET_CODE (x) == PLUS)
8819 rtx op1 = XEXP (x, 0);
8820 rtx op2 = XEXP (x, 1);
8821 rtx y;
8823 op1 = force_reg (Pmode, op1);
8824 op2 = force_reg (Pmode, op2);
8826 /* We can't always do [reg + reg] for these, because [reg +
8827 reg + offset] is not a legitimate addressing mode. */
8828 y = gen_rtx_PLUS (Pmode, op1, op2);
8830 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8831 return force_reg (Pmode, y);
8832 else
8833 return y;
8836 return force_reg (Pmode, x);
8838 else if ((TARGET_ELF
8839 #if TARGET_MACHO
8840 || !MACHO_DYNAMIC_NO_PIC_P
8841 #endif
8843 && TARGET_32BIT
8844 && TARGET_NO_TOC
8845 && ! flag_pic
8846 && GET_CODE (x) != CONST_INT
8847 && GET_CODE (x) != CONST_WIDE_INT
8848 && GET_CODE (x) != CONST_DOUBLE
8849 && CONSTANT_P (x)
8850 && GET_MODE_NUNITS (mode) == 1
8851 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8852 || (/* ??? Assume floating point reg based on mode? */
8853 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
8854 && (mode == DFmode || mode == DDmode))))
8856 rtx reg = gen_reg_rtx (Pmode);
8857 if (TARGET_ELF)
8858 emit_insn (gen_elf_high (reg, x));
8859 else
8860 emit_insn (gen_macho_high (reg, x));
8861 return gen_rtx_LO_SUM (Pmode, reg, x);
8863 else if (TARGET_TOC
8864 && GET_CODE (x) == SYMBOL_REF
8865 && constant_pool_expr_p (x)
8866 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8867 return create_TOC_reference (x, NULL_RTX);
8868 else
8869 return x;
8872 /* Debug version of rs6000_legitimize_address. */
8873 static rtx
8874 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8876 rtx ret;
8877 rtx_insn *insns;
8879 start_sequence ();
8880 ret = rs6000_legitimize_address (x, oldx, mode);
8881 insns = get_insns ();
8882 end_sequence ();
8884 if (ret != x)
8886 fprintf (stderr,
8887 "\nrs6000_legitimize_address: mode %s, old code %s, "
8888 "new code %s, modified\n",
8889 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8890 GET_RTX_NAME (GET_CODE (ret)));
8892 fprintf (stderr, "Original address:\n");
8893 debug_rtx (x);
8895 fprintf (stderr, "oldx:\n");
8896 debug_rtx (oldx);
8898 fprintf (stderr, "New address:\n");
8899 debug_rtx (ret);
8901 if (insns)
8903 fprintf (stderr, "Insns added:\n");
8904 debug_rtx_list (insns, 20);
8907 else
8909 fprintf (stderr,
8910 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8911 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8913 debug_rtx (x);
8916 if (insns)
8917 emit_insn (insns);
8919 return ret;
8922 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8923 We need to emit DTP-relative relocations. */
8925 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8926 static void
8927 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8929 switch (size)
8931 case 4:
8932 fputs ("\t.long\t", file);
8933 break;
8934 case 8:
8935 fputs (DOUBLE_INT_ASM_OP, file);
8936 break;
8937 default:
8938 gcc_unreachable ();
8940 output_addr_const (file, x);
8941 if (TARGET_ELF)
8942 fputs ("@dtprel+0x8000", file);
8943 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8945 switch (SYMBOL_REF_TLS_MODEL (x))
8947 case 0:
8948 break;
8949 case TLS_MODEL_LOCAL_EXEC:
8950 fputs ("@le", file);
8951 break;
8952 case TLS_MODEL_INITIAL_EXEC:
8953 fputs ("@ie", file);
8954 break;
8955 case TLS_MODEL_GLOBAL_DYNAMIC:
8956 case TLS_MODEL_LOCAL_DYNAMIC:
8957 fputs ("@m", file);
8958 break;
8959 default:
8960 gcc_unreachable ();
8965 /* Return true if X is a symbol that refers to real (rather than emulated)
8966 TLS. */
8968 static bool
8969 rs6000_real_tls_symbol_ref_p (rtx x)
8971 return (GET_CODE (x) == SYMBOL_REF
8972 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8975 /* In the name of slightly smaller debug output, and to cater to
8976 general assembler lossage, recognize various UNSPEC sequences
8977 and turn them back into a direct symbol reference. */
8979 static rtx
8980 rs6000_delegitimize_address (rtx orig_x)
8982 rtx x, y, offset;
8984 orig_x = delegitimize_mem_from_attrs (orig_x);
8985 x = orig_x;
8986 if (MEM_P (x))
8987 x = XEXP (x, 0);
8989 y = x;
8990 if (TARGET_CMODEL != CMODEL_SMALL
8991 && GET_CODE (y) == LO_SUM)
8992 y = XEXP (y, 1);
8994 offset = NULL_RTX;
8995 if (GET_CODE (y) == PLUS
8996 && GET_MODE (y) == Pmode
8997 && CONST_INT_P (XEXP (y, 1)))
8999 offset = XEXP (y, 1);
9000 y = XEXP (y, 0);
9003 if (GET_CODE (y) == UNSPEC
9004 && XINT (y, 1) == UNSPEC_TOCREL)
9006 y = XVECEXP (y, 0, 0);
9008 #ifdef HAVE_AS_TLS
9009 /* Do not associate thread-local symbols with the original
9010 constant pool symbol. */
9011 if (TARGET_XCOFF
9012 && GET_CODE (y) == SYMBOL_REF
9013 && CONSTANT_POOL_ADDRESS_P (y)
9014 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9015 return orig_x;
9016 #endif
9018 if (offset != NULL_RTX)
9019 y = gen_rtx_PLUS (Pmode, y, offset);
9020 if (!MEM_P (orig_x))
9021 return y;
9022 else
9023 return replace_equiv_address_nv (orig_x, y);
9026 if (TARGET_MACHO
9027 && GET_CODE (orig_x) == LO_SUM
9028 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9030 y = XEXP (XEXP (orig_x, 1), 0);
9031 if (GET_CODE (y) == UNSPEC
9032 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9033 return XVECEXP (y, 0, 0);
9036 return orig_x;
9039 /* Return true if X shouldn't be emitted into the debug info.
9040 The linker doesn't like .toc section references from
9041 .debug_* sections, so reject .toc section symbols. */
9043 static bool
9044 rs6000_const_not_ok_for_debug_p (rtx x)
9046 if (GET_CODE (x) == SYMBOL_REF
9047 && CONSTANT_POOL_ADDRESS_P (x))
9049 rtx c = get_pool_constant (x);
9050 machine_mode cmode = get_pool_mode (x);
9051 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9052 return true;
9055 return false;
9059 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
9061 static bool
9062 rs6000_legitimate_combined_insn (rtx_insn *insn)
9064 int icode = INSN_CODE (insn);
9066 /* Reject creating doloop insns. Combine should not be allowed
9067 to create these for a number of reasons:
9068 1) In a nested loop, if combine creates one of these in an
9069 outer loop and the register allocator happens to allocate ctr
9070 to the outer loop insn, then the inner loop can't use ctr.
9071 Inner loops ought to be more highly optimized.
9072 2) Combine often wants to create one of these from what was
9073 originally a three insn sequence, first combining the three
9074 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
9075 allocated ctr, the splitter takes use back to the three insn
9076 sequence. It's better to stop combine at the two insn
9077 sequence.
9078 3) Faced with not being able to allocate ctr for ctrsi/crtdi
9079 insns, the register allocator sometimes uses floating point
9080 or vector registers for the pseudo. Since ctrsi/ctrdi is a
9081 jump insn and output reloads are not implemented for jumps,
9082 the ctrsi/ctrdi splitters need to handle all possible cases.
9083 That's a pain, and it gets to be seriously difficult when a
9084 splitter that runs after reload needs memory to transfer from
9085 a gpr to fpr. See PR70098 and PR71763 which are not fixed
9086 for the difficult case. It's better to not create problems
9087 in the first place. */
9088 if (icode != CODE_FOR_nothing
9089 && (icode == CODE_FOR_ctrsi_internal1
9090 || icode == CODE_FOR_ctrdi_internal1
9091 || icode == CODE_FOR_ctrsi_internal2
9092 || icode == CODE_FOR_ctrdi_internal2
9093 || icode == CODE_FOR_ctrsi_internal3
9094 || icode == CODE_FOR_ctrdi_internal3
9095 || icode == CODE_FOR_ctrsi_internal4
9096 || icode == CODE_FOR_ctrdi_internal4))
9097 return false;
9099 return true;
9102 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9104 static GTY(()) rtx rs6000_tls_symbol;
9105 static rtx
9106 rs6000_tls_get_addr (void)
9108 if (!rs6000_tls_symbol)
9109 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9111 return rs6000_tls_symbol;
9114 /* Construct the SYMBOL_REF for TLS GOT references. */
9116 static GTY(()) rtx rs6000_got_symbol;
9117 static rtx
9118 rs6000_got_sym (void)
9120 if (!rs6000_got_symbol)
9122 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9123 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9124 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9127 return rs6000_got_symbol;
9130 /* AIX Thread-Local Address support. */
9132 static rtx
9133 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9135 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9136 const char *name;
9137 char *tlsname;
9139 name = XSTR (addr, 0);
9140 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9141 or the symbol will be in TLS private data section. */
9142 if (name[strlen (name) - 1] != ']'
9143 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9144 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9146 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9147 strcpy (tlsname, name);
9148 strcat (tlsname,
9149 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9150 tlsaddr = copy_rtx (addr);
9151 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9153 else
9154 tlsaddr = addr;
9156 /* Place addr into TOC constant pool. */
9157 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9159 /* Output the TOC entry and create the MEM referencing the value. */
9160 if (constant_pool_expr_p (XEXP (sym, 0))
9161 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9163 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9164 mem = gen_const_mem (Pmode, tocref);
9165 set_mem_alias_set (mem, get_TOC_alias_set ());
9167 else
9168 return sym;
9170 /* Use global-dynamic for local-dynamic. */
9171 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9172 || model == TLS_MODEL_LOCAL_DYNAMIC)
9174 /* Create new TOC reference for @m symbol. */
9175 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9176 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9177 strcpy (tlsname, "*LCM");
9178 strcat (tlsname, name + 3);
9179 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9180 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9181 tocref = create_TOC_reference (modaddr, NULL_RTX);
9182 rtx modmem = gen_const_mem (Pmode, tocref);
9183 set_mem_alias_set (modmem, get_TOC_alias_set ());
9185 rtx modreg = gen_reg_rtx (Pmode);
9186 emit_insn (gen_rtx_SET (modreg, modmem));
9188 tmpreg = gen_reg_rtx (Pmode);
9189 emit_insn (gen_rtx_SET (tmpreg, mem));
9191 dest = gen_reg_rtx (Pmode);
9192 if (TARGET_32BIT)
9193 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9194 else
9195 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9196 return dest;
9198 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9199 else if (TARGET_32BIT)
9201 tlsreg = gen_reg_rtx (SImode);
9202 emit_insn (gen_tls_get_tpointer (tlsreg));
9204 else
9205 tlsreg = gen_rtx_REG (DImode, 13);
9207 /* Load the TOC value into temporary register. */
9208 tmpreg = gen_reg_rtx (Pmode);
9209 emit_insn (gen_rtx_SET (tmpreg, mem));
9210 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9211 gen_rtx_MINUS (Pmode, addr, tlsreg));
9213 /* Add TOC symbol value to TLS pointer. */
9214 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9216 return dest;
9219 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9220 this (thread-local) address. */
9222 static rtx
9223 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9225 rtx dest, insn;
9227 if (TARGET_XCOFF)
9228 return rs6000_legitimize_tls_address_aix (addr, model);
9230 dest = gen_reg_rtx (Pmode);
9231 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9233 rtx tlsreg;
9235 if (TARGET_64BIT)
9237 tlsreg = gen_rtx_REG (Pmode, 13);
9238 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9240 else
9242 tlsreg = gen_rtx_REG (Pmode, 2);
9243 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9245 emit_insn (insn);
9247 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9249 rtx tlsreg, tmp;
9251 tmp = gen_reg_rtx (Pmode);
9252 if (TARGET_64BIT)
9254 tlsreg = gen_rtx_REG (Pmode, 13);
9255 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9257 else
9259 tlsreg = gen_rtx_REG (Pmode, 2);
9260 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9262 emit_insn (insn);
9263 if (TARGET_64BIT)
9264 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9265 else
9266 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9267 emit_insn (insn);
9269 else
9271 rtx r3, got, tga, tmp1, tmp2, call_insn;
9273 /* We currently use relocations like @got@tlsgd for tls, which
9274 means the linker will handle allocation of tls entries, placing
9275 them in the .got section. So use a pointer to the .got section,
9276 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9277 or to secondary GOT sections used by 32-bit -fPIC. */
9278 if (TARGET_64BIT)
9279 got = gen_rtx_REG (Pmode, 2);
9280 else
9282 if (flag_pic == 1)
9283 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9284 else
9286 rtx gsym = rs6000_got_sym ();
9287 got = gen_reg_rtx (Pmode);
9288 if (flag_pic == 0)
9289 rs6000_emit_move (got, gsym, Pmode);
9290 else
9292 rtx mem, lab;
9294 tmp1 = gen_reg_rtx (Pmode);
9295 tmp2 = gen_reg_rtx (Pmode);
9296 mem = gen_const_mem (Pmode, tmp1);
9297 lab = gen_label_rtx ();
9298 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9299 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9300 if (TARGET_LINK_STACK)
9301 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9302 emit_move_insn (tmp2, mem);
9303 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9304 set_unique_reg_note (last, REG_EQUAL, gsym);
9309 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9311 tga = rs6000_tls_get_addr ();
9312 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9313 const0_rtx, Pmode);
9315 r3 = gen_rtx_REG (Pmode, 3);
9316 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9318 if (TARGET_64BIT)
9319 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9320 else
9321 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9323 else if (DEFAULT_ABI == ABI_V4)
9324 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9325 else
9326 gcc_unreachable ();
9327 call_insn = last_call_insn ();
9328 PATTERN (call_insn) = insn;
9329 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9330 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9331 pic_offset_table_rtx);
9333 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9335 tga = rs6000_tls_get_addr ();
9336 tmp1 = gen_reg_rtx (Pmode);
9337 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9338 const0_rtx, Pmode);
9340 r3 = gen_rtx_REG (Pmode, 3);
9341 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9343 if (TARGET_64BIT)
9344 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9345 else
9346 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9348 else if (DEFAULT_ABI == ABI_V4)
9349 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9350 else
9351 gcc_unreachable ();
9352 call_insn = last_call_insn ();
9353 PATTERN (call_insn) = insn;
9354 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9355 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9356 pic_offset_table_rtx);
9358 if (rs6000_tls_size == 16)
9360 if (TARGET_64BIT)
9361 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9362 else
9363 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9365 else if (rs6000_tls_size == 32)
9367 tmp2 = gen_reg_rtx (Pmode);
9368 if (TARGET_64BIT)
9369 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9370 else
9371 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9372 emit_insn (insn);
9373 if (TARGET_64BIT)
9374 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9375 else
9376 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9378 else
9380 tmp2 = gen_reg_rtx (Pmode);
9381 if (TARGET_64BIT)
9382 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9383 else
9384 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9385 emit_insn (insn);
9386 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9388 emit_insn (insn);
9390 else
9392 /* IE, or 64-bit offset LE. */
9393 tmp2 = gen_reg_rtx (Pmode);
9394 if (TARGET_64BIT)
9395 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9396 else
9397 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9398 emit_insn (insn);
9399 if (TARGET_64BIT)
9400 insn = gen_tls_tls_64 (dest, tmp2, addr);
9401 else
9402 insn = gen_tls_tls_32 (dest, tmp2, addr);
9403 emit_insn (insn);
9407 return dest;
9410 /* Only create the global variable for the stack protect guard if we are using
9411 the global flavor of that guard. */
9412 static tree
9413 rs6000_init_stack_protect_guard (void)
9415 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9416 return default_stack_protect_guard ();
9418 return NULL_TREE;
9421 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9423 static bool
9424 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9426 if (GET_CODE (x) == HIGH
9427 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9428 return true;
9430 /* A TLS symbol in the TOC cannot contain a sum. */
9431 if (GET_CODE (x) == CONST
9432 && GET_CODE (XEXP (x, 0)) == PLUS
9433 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9434 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9435 return true;
9437 /* Do not place an ELF TLS symbol in the constant pool. */
9438 return TARGET_ELF && tls_referenced_p (x);
9441 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9442 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9443 can be addressed relative to the toc pointer. */
9445 static bool
9446 use_toc_relative_ref (rtx sym, machine_mode mode)
9448 return ((constant_pool_expr_p (sym)
9449 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9450 get_pool_mode (sym)))
9451 || (TARGET_CMODEL == CMODEL_MEDIUM
9452 && SYMBOL_REF_LOCAL_P (sym)
9453 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9456 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9457 replace the input X, or the original X if no replacement is called for.
9458 The output parameter *WIN is 1 if the calling macro should goto WIN,
9459 0 if it should not.
9461 For RS/6000, we wish to handle large displacements off a base
9462 register by splitting the addend across an addiu/addis and the mem insn.
9463 This cuts number of extra insns needed from 3 to 1.
9465 On Darwin, we use this to generate code for floating point constants.
9466 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9467 The Darwin code is inside #if TARGET_MACHO because only then are the
9468 machopic_* functions defined. */
9469 static rtx
9470 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9471 int opnum, int type,
9472 int ind_levels ATTRIBUTE_UNUSED, int *win)
9474 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9475 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9477 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9478 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9479 if (reg_offset_p
9480 && opnum == 1
9481 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9482 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9483 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9484 && TARGET_P9_VECTOR)
9485 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9486 && TARGET_P9_VECTOR)))
9487 reg_offset_p = false;
9489 /* We must recognize output that we have already generated ourselves. */
9490 if (GET_CODE (x) == PLUS
9491 && GET_CODE (XEXP (x, 0)) == PLUS
9492 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9493 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9494 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9496 if (TARGET_DEBUG_ADDR)
9498 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9499 debug_rtx (x);
9501 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9502 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9503 opnum, (enum reload_type) type);
9504 *win = 1;
9505 return x;
9508 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9509 if (GET_CODE (x) == LO_SUM
9510 && GET_CODE (XEXP (x, 0)) == HIGH)
9512 if (TARGET_DEBUG_ADDR)
9514 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9515 debug_rtx (x);
9517 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9518 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9519 opnum, (enum reload_type) type);
9520 *win = 1;
9521 return x;
9524 #if TARGET_MACHO
9525 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9526 && GET_CODE (x) == LO_SUM
9527 && GET_CODE (XEXP (x, 0)) == PLUS
9528 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9529 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9530 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9531 && machopic_operand_p (XEXP (x, 1)))
9533 /* Result of previous invocation of this function on Darwin
9534 floating point constant. */
9535 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9536 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9537 opnum, (enum reload_type) type);
9538 *win = 1;
9539 return x;
9541 #endif
9543 if (TARGET_CMODEL != CMODEL_SMALL
9544 && reg_offset_p
9545 && !quad_offset_p
9546 && small_toc_ref (x, VOIDmode))
9548 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9549 x = gen_rtx_LO_SUM (Pmode, hi, x);
9550 if (TARGET_DEBUG_ADDR)
9552 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9553 debug_rtx (x);
9555 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9556 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9557 opnum, (enum reload_type) type);
9558 *win = 1;
9559 return x;
9562 if (GET_CODE (x) == PLUS
9563 && REG_P (XEXP (x, 0))
9564 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9565 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9566 && CONST_INT_P (XEXP (x, 1))
9567 && reg_offset_p
9568 && !PAIRED_VECTOR_MODE (mode)
9569 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9571 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9572 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9573 HOST_WIDE_INT high
9574 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9576 /* Check for 32-bit overflow or quad addresses with one of the
9577 four least significant bits set. */
9578 if (high + low != val
9579 || (quad_offset_p && (low & 0xf)))
9581 *win = 0;
9582 return x;
9585 /* Reload the high part into a base reg; leave the low part
9586 in the mem directly. */
9588 x = gen_rtx_PLUS (GET_MODE (x),
9589 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9590 GEN_INT (high)),
9591 GEN_INT (low));
9593 if (TARGET_DEBUG_ADDR)
9595 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9596 debug_rtx (x);
9598 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9599 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9600 opnum, (enum reload_type) type);
9601 *win = 1;
9602 return x;
9605 if (GET_CODE (x) == SYMBOL_REF
9606 && reg_offset_p
9607 && !quad_offset_p
9608 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9609 && !PAIRED_VECTOR_MODE (mode)
9610 #if TARGET_MACHO
9611 && DEFAULT_ABI == ABI_DARWIN
9612 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9613 && machopic_symbol_defined_p (x)
9614 #else
9615 && DEFAULT_ABI == ABI_V4
9616 && !flag_pic
9617 #endif
9618 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9619 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9620 without fprs.
9621 ??? Assume floating point reg based on mode? This assumption is
9622 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9623 where reload ends up doing a DFmode load of a constant from
9624 mem using two gprs. Unfortunately, at this point reload
9625 hasn't yet selected regs so poking around in reload data
9626 won't help and even if we could figure out the regs reliably,
9627 we'd still want to allow this transformation when the mem is
9628 naturally aligned. Since we say the address is good here, we
9629 can't disable offsets from LO_SUMs in mem_operand_gpr.
9630 FIXME: Allow offset from lo_sum for other modes too, when
9631 mem is sufficiently aligned.
9633 Also disallow this if the type can go in VMX/Altivec registers, since
9634 those registers do not have d-form (reg+offset) address modes. */
9635 && !reg_addr[mode].scalar_in_vmx_p
9636 && mode != TFmode
9637 && mode != TDmode
9638 && mode != IFmode
9639 && mode != KFmode
9640 && (mode != TImode || !TARGET_VSX)
9641 && mode != PTImode
9642 && (mode != DImode || TARGET_POWERPC64)
9643 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9644 || (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)))
9646 #if TARGET_MACHO
9647 if (flag_pic)
9649 rtx offset = machopic_gen_offset (x);
9650 x = gen_rtx_LO_SUM (GET_MODE (x),
9651 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9652 gen_rtx_HIGH (Pmode, offset)), offset);
9654 else
9655 #endif
9656 x = gen_rtx_LO_SUM (GET_MODE (x),
9657 gen_rtx_HIGH (Pmode, x), x);
9659 if (TARGET_DEBUG_ADDR)
9661 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9662 debug_rtx (x);
9664 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9665 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9666 opnum, (enum reload_type) type);
9667 *win = 1;
9668 return x;
9671 /* Reload an offset address wrapped by an AND that represents the
9672 masking of the lower bits. Strip the outer AND and let reload
9673 convert the offset address into an indirect address. For VSX,
9674 force reload to create the address with an AND in a separate
9675 register, because we can't guarantee an altivec register will
9676 be used. */
9677 if (VECTOR_MEM_ALTIVEC_P (mode)
9678 && GET_CODE (x) == AND
9679 && GET_CODE (XEXP (x, 0)) == PLUS
9680 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9681 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9682 && GET_CODE (XEXP (x, 1)) == CONST_INT
9683 && INTVAL (XEXP (x, 1)) == -16)
9685 x = XEXP (x, 0);
9686 *win = 1;
9687 return x;
9690 if (TARGET_TOC
9691 && reg_offset_p
9692 && !quad_offset_p
9693 && GET_CODE (x) == SYMBOL_REF
9694 && use_toc_relative_ref (x, mode))
9696 x = create_TOC_reference (x, NULL_RTX);
9697 if (TARGET_CMODEL != CMODEL_SMALL)
9699 if (TARGET_DEBUG_ADDR)
9701 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9702 debug_rtx (x);
9704 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9705 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9706 opnum, (enum reload_type) type);
9708 *win = 1;
9709 return x;
9711 *win = 0;
9712 return x;
9715 /* Debug version of rs6000_legitimize_reload_address. */
9716 static rtx
9717 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9718 int opnum, int type,
9719 int ind_levels, int *win)
9721 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9722 ind_levels, win);
9723 fprintf (stderr,
9724 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9725 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9726 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9727 debug_rtx (x);
9729 if (x == ret)
9730 fprintf (stderr, "Same address returned\n");
9731 else if (!ret)
9732 fprintf (stderr, "NULL returned\n");
9733 else
9735 fprintf (stderr, "New address:\n");
9736 debug_rtx (ret);
9739 return ret;
9742 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9743 that is a valid memory address for an instruction.
9744 The MODE argument is the machine mode for the MEM expression
9745 that wants to use this address.
9747 On the RS/6000, there are four valid address: a SYMBOL_REF that
9748 refers to a constant pool entry of an address (or the sum of it
9749 plus a constant), a short (16-bit signed) constant plus a register,
9750 the sum of two registers, or a register indirect, possibly with an
9751 auto-increment. For DFmode, DDmode and DImode with a constant plus
9752 register, we must ensure that both words are addressable or PowerPC64
9753 with offset word aligned.
9755 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9756 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9757 because adjacent memory cells are accessed by adding word-sized offsets
9758 during assembly output. */
9759 static bool
9760 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9762 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9763 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9765 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9766 if (VECTOR_MEM_ALTIVEC_P (mode)
9767 && GET_CODE (x) == AND
9768 && GET_CODE (XEXP (x, 1)) == CONST_INT
9769 && INTVAL (XEXP (x, 1)) == -16)
9770 x = XEXP (x, 0);
9772 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9773 return 0;
9774 if (legitimate_indirect_address_p (x, reg_ok_strict))
9775 return 1;
9776 if (TARGET_UPDATE
9777 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9778 && mode_supports_pre_incdec_p (mode)
9779 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9780 return 1;
9781 /* Handle restricted vector d-form offsets in ISA 3.0. */
9782 if (quad_offset_p)
9784 if (quad_address_p (x, mode, reg_ok_strict))
9785 return 1;
9787 else if (virtual_stack_registers_memory_p (x))
9788 return 1;
9790 else if (reg_offset_p)
9792 if (legitimate_small_data_p (mode, x))
9793 return 1;
9794 if (legitimate_constant_pool_address_p (x, mode,
9795 reg_ok_strict || lra_in_progress))
9796 return 1;
9797 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9798 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9799 return 1;
9802 /* For TImode, if we have TImode in VSX registers, only allow register
9803 indirect addresses. This will allow the values to go in either GPRs
9804 or VSX registers without reloading. The vector types would tend to
9805 go into VSX registers, so we allow REG+REG, while TImode seems
9806 somewhat split, in that some uses are GPR based, and some VSX based. */
9807 /* FIXME: We could loosen this by changing the following to
9808 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9809 but currently we cannot allow REG+REG addressing for TImode. See
9810 PR72827 for complete details on how this ends up hoodwinking DSE. */
9811 if (mode == TImode && TARGET_VSX)
9812 return 0;
9813 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9814 if (! reg_ok_strict
9815 && reg_offset_p
9816 && GET_CODE (x) == PLUS
9817 && GET_CODE (XEXP (x, 0)) == REG
9818 && (XEXP (x, 0) == virtual_stack_vars_rtx
9819 || XEXP (x, 0) == arg_pointer_rtx)
9820 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9821 return 1;
9822 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9823 return 1;
9824 if (!FLOAT128_2REG_P (mode)
9825 && ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
9826 || TARGET_POWERPC64
9827 || (mode != DFmode && mode != DDmode))
9828 && (TARGET_POWERPC64 || mode != DImode)
9829 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9830 && mode != PTImode
9831 && !avoiding_indexed_address_p (mode)
9832 && legitimate_indexed_address_p (x, reg_ok_strict))
9833 return 1;
9834 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9835 && mode_supports_pre_modify_p (mode)
9836 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9837 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9838 reg_ok_strict, false)
9839 || (!avoiding_indexed_address_p (mode)
9840 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9841 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9842 return 1;
9843 if (reg_offset_p && !quad_offset_p
9844 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9845 return 1;
9846 return 0;
9849 /* Debug version of rs6000_legitimate_address_p. */
9850 static bool
9851 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9852 bool reg_ok_strict)
9854 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9855 fprintf (stderr,
9856 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9857 "strict = %d, reload = %s, code = %s\n",
9858 ret ? "true" : "false",
9859 GET_MODE_NAME (mode),
9860 reg_ok_strict,
9861 (reload_completed ? "after" : "before"),
9862 GET_RTX_NAME (GET_CODE (x)));
9863 debug_rtx (x);
9865 return ret;
9868 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9870 static bool
9871 rs6000_mode_dependent_address_p (const_rtx addr,
9872 addr_space_t as ATTRIBUTE_UNUSED)
9874 return rs6000_mode_dependent_address_ptr (addr);
9877 /* Go to LABEL if ADDR (a legitimate address expression)
9878 has an effect that depends on the machine mode it is used for.
9880 On the RS/6000 this is true of all integral offsets (since AltiVec
9881 and VSX modes don't allow them) or is a pre-increment or decrement.
9883 ??? Except that due to conceptual problems in offsettable_address_p
9884 we can't really report the problems of integral offsets. So leave
9885 this assuming that the adjustable offset must be valid for the
9886 sub-words of a TFmode operand, which is what we had before. */
9888 static bool
9889 rs6000_mode_dependent_address (const_rtx addr)
9891 switch (GET_CODE (addr))
9893 case PLUS:
9894 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9895 is considered a legitimate address before reload, so there
9896 are no offset restrictions in that case. Note that this
9897 condition is safe in strict mode because any address involving
9898 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9899 been rejected as illegitimate. */
9900 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9901 && XEXP (addr, 0) != arg_pointer_rtx
9902 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9904 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9905 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9907 break;
9909 case LO_SUM:
9910 /* Anything in the constant pool is sufficiently aligned that
9911 all bytes have the same high part address. */
9912 return !legitimate_constant_pool_address_p (addr, QImode, false);
9914 /* Auto-increment cases are now treated generically in recog.c. */
9915 case PRE_MODIFY:
9916 return TARGET_UPDATE;
9918 /* AND is only allowed in Altivec loads. */
9919 case AND:
9920 return true;
9922 default:
9923 break;
9926 return false;
9929 /* Debug version of rs6000_mode_dependent_address. */
9930 static bool
9931 rs6000_debug_mode_dependent_address (const_rtx addr)
9933 bool ret = rs6000_mode_dependent_address (addr);
9935 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9936 ret ? "true" : "false");
9937 debug_rtx (addr);
9939 return ret;
9942 /* Implement FIND_BASE_TERM. */
9945 rs6000_find_base_term (rtx op)
9947 rtx base;
9949 base = op;
9950 if (GET_CODE (base) == CONST)
9951 base = XEXP (base, 0);
9952 if (GET_CODE (base) == PLUS)
9953 base = XEXP (base, 0);
9954 if (GET_CODE (base) == UNSPEC)
9955 switch (XINT (base, 1))
9957 case UNSPEC_TOCREL:
9958 case UNSPEC_MACHOPIC_OFFSET:
9959 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9960 for aliasing purposes. */
9961 return XVECEXP (base, 0, 0);
9964 return op;
9967 /* More elaborate version of recog's offsettable_memref_p predicate
9968 that works around the ??? note of rs6000_mode_dependent_address.
9969 In particular it accepts
9971 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9973 in 32-bit mode, that the recog predicate rejects. */
9975 static bool
9976 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9978 bool worst_case;
9980 if (!MEM_P (op))
9981 return false;
9983 /* First mimic offsettable_memref_p. */
9984 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9985 return true;
9987 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9988 the latter predicate knows nothing about the mode of the memory
9989 reference and, therefore, assumes that it is the largest supported
9990 mode (TFmode). As a consequence, legitimate offsettable memory
9991 references are rejected. rs6000_legitimate_offset_address_p contains
9992 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9993 at least with a little bit of help here given that we know the
9994 actual registers used. */
9995 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9996 || GET_MODE_SIZE (reg_mode) == 4);
9997 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9998 true, worst_case);
10001 /* Determine the reassociation width to be used in reassociate_bb.
10002 This takes into account how many parallel operations we
10003 can actually do of a given type, and also the latency.
10005 int add/sub 6/cycle
10006 mul 2/cycle
10007 vect add/sub/mul 2/cycle
10008 fp add/sub/mul 2/cycle
10009 dfp 1/cycle
10012 static int
10013 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
10014 machine_mode mode)
10016 switch (rs6000_cpu)
10018 case PROCESSOR_POWER8:
10019 case PROCESSOR_POWER9:
10020 if (DECIMAL_FLOAT_MODE_P (mode))
10021 return 1;
10022 if (VECTOR_MODE_P (mode))
10023 return 4;
10024 if (INTEGRAL_MODE_P (mode))
10025 return opc == MULT_EXPR ? 4 : 6;
10026 if (FLOAT_MODE_P (mode))
10027 return 4;
10028 break;
10029 default:
10030 break;
10032 return 1;
10035 /* Change register usage conditional on target flags. */
10036 static void
10037 rs6000_conditional_register_usage (void)
10039 int i;
10041 if (TARGET_DEBUG_TARGET)
10042 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10044 /* Set MQ register fixed (already call_used) so that it will not be
10045 allocated. */
10046 fixed_regs[64] = 1;
10048 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10049 if (TARGET_64BIT)
10050 fixed_regs[13] = call_used_regs[13]
10051 = call_really_used_regs[13] = 1;
10053 /* Conditionally disable FPRs. */
10054 if (TARGET_SOFT_FLOAT)
10055 for (i = 32; i < 64; i++)
10056 fixed_regs[i] = call_used_regs[i]
10057 = call_really_used_regs[i] = 1;
10059 /* The TOC register is not killed across calls in a way that is
10060 visible to the compiler. */
10061 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10062 call_really_used_regs[2] = 0;
10064 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10065 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10067 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10068 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10069 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10070 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10072 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10073 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10074 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10075 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10077 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10078 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10079 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10081 if (!TARGET_ALTIVEC && !TARGET_VSX)
10083 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10084 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10085 call_really_used_regs[VRSAVE_REGNO] = 1;
10088 if (TARGET_ALTIVEC || TARGET_VSX)
10089 global_regs[VSCR_REGNO] = 1;
10091 if (TARGET_ALTIVEC_ABI)
10093 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10094 call_used_regs[i] = call_really_used_regs[i] = 1;
10096 /* AIX reserves VR20:31 in non-extended ABI mode. */
10097 if (TARGET_XCOFF)
10098 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10099 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10104 /* Output insns to set DEST equal to the constant SOURCE as a series of
10105 lis, ori and shl instructions and return TRUE. */
10107 bool
10108 rs6000_emit_set_const (rtx dest, rtx source)
10110 machine_mode mode = GET_MODE (dest);
10111 rtx temp, set;
10112 rtx_insn *insn;
10113 HOST_WIDE_INT c;
10115 gcc_checking_assert (CONST_INT_P (source));
10116 c = INTVAL (source);
10117 switch (mode)
10119 case E_QImode:
10120 case E_HImode:
10121 emit_insn (gen_rtx_SET (dest, source));
10122 return true;
10124 case E_SImode:
10125 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10127 emit_insn (gen_rtx_SET (copy_rtx (temp),
10128 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10129 emit_insn (gen_rtx_SET (dest,
10130 gen_rtx_IOR (SImode, copy_rtx (temp),
10131 GEN_INT (c & 0xffff))));
10132 break;
10134 case E_DImode:
10135 if (!TARGET_POWERPC64)
10137 rtx hi, lo;
10139 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10140 DImode);
10141 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10142 DImode);
10143 emit_move_insn (hi, GEN_INT (c >> 32));
10144 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10145 emit_move_insn (lo, GEN_INT (c));
10147 else
10148 rs6000_emit_set_long_const (dest, c);
10149 break;
10151 default:
10152 gcc_unreachable ();
10155 insn = get_last_insn ();
10156 set = single_set (insn);
10157 if (! CONSTANT_P (SET_SRC (set)))
10158 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10160 return true;
10163 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10164 Output insns to set DEST equal to the constant C as a series of
10165 lis, ori and shl instructions. */
10167 static void
10168 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10170 rtx temp;
10171 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10173 ud1 = c & 0xffff;
10174 c = c >> 16;
10175 ud2 = c & 0xffff;
10176 c = c >> 16;
10177 ud3 = c & 0xffff;
10178 c = c >> 16;
10179 ud4 = c & 0xffff;
10181 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10182 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10183 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10185 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10186 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10188 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10190 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10191 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10192 if (ud1 != 0)
10193 emit_move_insn (dest,
10194 gen_rtx_IOR (DImode, copy_rtx (temp),
10195 GEN_INT (ud1)));
10197 else if (ud3 == 0 && ud4 == 0)
10199 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10201 gcc_assert (ud2 & 0x8000);
10202 emit_move_insn (copy_rtx (temp),
10203 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10204 if (ud1 != 0)
10205 emit_move_insn (copy_rtx (temp),
10206 gen_rtx_IOR (DImode, copy_rtx (temp),
10207 GEN_INT (ud1)));
10208 emit_move_insn (dest,
10209 gen_rtx_ZERO_EXTEND (DImode,
10210 gen_lowpart (SImode,
10211 copy_rtx (temp))));
10213 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10214 || (ud4 == 0 && ! (ud3 & 0x8000)))
10216 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10218 emit_move_insn (copy_rtx (temp),
10219 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10220 if (ud2 != 0)
10221 emit_move_insn (copy_rtx (temp),
10222 gen_rtx_IOR (DImode, copy_rtx (temp),
10223 GEN_INT (ud2)));
10224 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10225 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10226 GEN_INT (16)));
10227 if (ud1 != 0)
10228 emit_move_insn (dest,
10229 gen_rtx_IOR (DImode, copy_rtx (temp),
10230 GEN_INT (ud1)));
10232 else
10234 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10236 emit_move_insn (copy_rtx (temp),
10237 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10238 if (ud3 != 0)
10239 emit_move_insn (copy_rtx (temp),
10240 gen_rtx_IOR (DImode, copy_rtx (temp),
10241 GEN_INT (ud3)));
10243 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10244 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10245 GEN_INT (32)));
10246 if (ud2 != 0)
10247 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10248 gen_rtx_IOR (DImode, copy_rtx (temp),
10249 GEN_INT (ud2 << 16)));
10250 if (ud1 != 0)
10251 emit_move_insn (dest,
10252 gen_rtx_IOR (DImode, copy_rtx (temp),
10253 GEN_INT (ud1)));
10257 /* Helper for the following. Get rid of [r+r] memory refs
10258 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10260 static void
10261 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10263 if (GET_CODE (operands[0]) == MEM
10264 && GET_CODE (XEXP (operands[0], 0)) != REG
10265 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10266 GET_MODE (operands[0]), false))
10267 operands[0]
10268 = replace_equiv_address (operands[0],
10269 copy_addr_to_reg (XEXP (operands[0], 0)));
10271 if (GET_CODE (operands[1]) == MEM
10272 && GET_CODE (XEXP (operands[1], 0)) != REG
10273 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10274 GET_MODE (operands[1]), false))
10275 operands[1]
10276 = replace_equiv_address (operands[1],
10277 copy_addr_to_reg (XEXP (operands[1], 0)));
10280 /* Generate a vector of constants to permute MODE for a little-endian
10281 storage operation by swapping the two halves of a vector. */
10282 static rtvec
10283 rs6000_const_vec (machine_mode mode)
10285 int i, subparts;
10286 rtvec v;
10288 switch (mode)
10290 case E_V1TImode:
10291 subparts = 1;
10292 break;
10293 case E_V2DFmode:
10294 case E_V2DImode:
10295 subparts = 2;
10296 break;
10297 case E_V4SFmode:
10298 case E_V4SImode:
10299 subparts = 4;
10300 break;
10301 case E_V8HImode:
10302 subparts = 8;
10303 break;
10304 case E_V16QImode:
10305 subparts = 16;
10306 break;
10307 default:
10308 gcc_unreachable();
10311 v = rtvec_alloc (subparts);
10313 for (i = 0; i < subparts / 2; ++i)
10314 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10315 for (i = subparts / 2; i < subparts; ++i)
10316 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10318 return v;
10321 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10322 store operation. */
10323 void
10324 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10326 /* Scalar permutations are easier to express in integer modes rather than
10327 floating-point modes, so cast them here. We use V1TImode instead
10328 of TImode to ensure that the values don't go through GPRs. */
10329 if (FLOAT128_VECTOR_P (mode))
10331 dest = gen_lowpart (V1TImode, dest);
10332 source = gen_lowpart (V1TImode, source);
10333 mode = V1TImode;
10336 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10337 scalar. */
10338 if (mode == TImode || mode == V1TImode)
10339 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10340 GEN_INT (64))));
10341 else
10343 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10344 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10348 /* Emit a little-endian load from vector memory location SOURCE to VSX
10349 register DEST in mode MODE. The load is done with two permuting
10350 insn's that represent an lxvd2x and xxpermdi. */
10351 void
10352 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10354 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10355 V1TImode). */
10356 if (mode == TImode || mode == V1TImode)
10358 mode = V2DImode;
10359 dest = gen_lowpart (V2DImode, dest);
10360 source = adjust_address (source, V2DImode, 0);
10363 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10364 rs6000_emit_le_vsx_permute (tmp, source, mode);
10365 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10368 /* Emit a little-endian store to vector memory location DEST from VSX
10369 register SOURCE in mode MODE. The store is done with two permuting
10370 insn's that represent an xxpermdi and an stxvd2x. */
10371 void
10372 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10374 /* This should never be called during or after LRA, because it does
10375 not re-permute the source register. It is intended only for use
10376 during expand. */
10377 gcc_assert (!lra_in_progress && !reload_completed);
10379 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10380 V1TImode). */
10381 if (mode == TImode || mode == V1TImode)
10383 mode = V2DImode;
10384 dest = adjust_address (dest, V2DImode, 0);
10385 source = gen_lowpart (V2DImode, source);
10388 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10389 rs6000_emit_le_vsx_permute (tmp, source, mode);
10390 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10393 /* Emit a sequence representing a little-endian VSX load or store,
10394 moving data from SOURCE to DEST in mode MODE. This is done
10395 separately from rs6000_emit_move to ensure it is called only
10396 during expand. LE VSX loads and stores introduced later are
10397 handled with a split. The expand-time RTL generation allows
10398 us to optimize away redundant pairs of register-permutes. */
10399 void
10400 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10402 gcc_assert (!BYTES_BIG_ENDIAN
10403 && VECTOR_MEM_VSX_P (mode)
10404 && !TARGET_P9_VECTOR
10405 && !gpr_or_gpr_p (dest, source)
10406 && (MEM_P (source) ^ MEM_P (dest)));
10408 if (MEM_P (source))
10410 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10411 rs6000_emit_le_vsx_load (dest, source, mode);
10413 else
10415 if (!REG_P (source))
10416 source = force_reg (mode, source);
10417 rs6000_emit_le_vsx_store (dest, source, mode);
10421 /* Return whether a SFmode or SImode move can be done without converting one
10422 mode to another. This arrises when we have:
10424 (SUBREG:SF (REG:SI ...))
10425 (SUBREG:SI (REG:SF ...))
10427 and one of the values is in a floating point/vector register, where SFmode
10428 scalars are stored in DFmode format. */
10430 bool
10431 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10433 if (TARGET_ALLOW_SF_SUBREG)
10434 return true;
10436 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10437 return true;
10439 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10440 return true;
10442 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10443 if (SUBREG_P (dest))
10445 rtx dest_subreg = SUBREG_REG (dest);
10446 rtx src_subreg = SUBREG_REG (src);
10447 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10450 return false;
10454 /* Helper function to change moves with:
10456 (SUBREG:SF (REG:SI)) and
10457 (SUBREG:SI (REG:SF))
10459 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10460 values are stored as DFmode values in the VSX registers. We need to convert
10461 the bits before we can use a direct move or operate on the bits in the
10462 vector register as an integer type.
10464 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10466 static bool
10467 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10469 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10470 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10471 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10473 rtx inner_source = SUBREG_REG (source);
10474 machine_mode inner_mode = GET_MODE (inner_source);
10476 if (mode == SImode && inner_mode == SFmode)
10478 emit_insn (gen_movsi_from_sf (dest, inner_source));
10479 return true;
10482 if (mode == SFmode && inner_mode == SImode)
10484 emit_insn (gen_movsf_from_si (dest, inner_source));
10485 return true;
10489 return false;
10492 /* Emit a move from SOURCE to DEST in mode MODE. */
10493 void
10494 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10496 rtx operands[2];
10497 operands[0] = dest;
10498 operands[1] = source;
10500 if (TARGET_DEBUG_ADDR)
10502 fprintf (stderr,
10503 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10504 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10505 GET_MODE_NAME (mode),
10506 lra_in_progress,
10507 reload_completed,
10508 can_create_pseudo_p ());
10509 debug_rtx (dest);
10510 fprintf (stderr, "source:\n");
10511 debug_rtx (source);
10514 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10515 if (CONST_WIDE_INT_P (operands[1])
10516 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10518 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10519 gcc_unreachable ();
10522 /* See if we need to special case SImode/SFmode SUBREG moves. */
10523 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10524 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10525 return;
10527 /* Check if GCC is setting up a block move that will end up using FP
10528 registers as temporaries. We must make sure this is acceptable. */
10529 if (GET_CODE (operands[0]) == MEM
10530 && GET_CODE (operands[1]) == MEM
10531 && mode == DImode
10532 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10533 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10534 && ! (rs6000_slow_unaligned_access (SImode,
10535 (MEM_ALIGN (operands[0]) > 32
10536 ? 32 : MEM_ALIGN (operands[0])))
10537 || rs6000_slow_unaligned_access (SImode,
10538 (MEM_ALIGN (operands[1]) > 32
10539 ? 32 : MEM_ALIGN (operands[1]))))
10540 && ! MEM_VOLATILE_P (operands [0])
10541 && ! MEM_VOLATILE_P (operands [1]))
10543 emit_move_insn (adjust_address (operands[0], SImode, 0),
10544 adjust_address (operands[1], SImode, 0));
10545 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10546 adjust_address (copy_rtx (operands[1]), SImode, 4));
10547 return;
10550 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10551 && !gpc_reg_operand (operands[1], mode))
10552 operands[1] = force_reg (mode, operands[1]);
10554 /* Recognize the case where operand[1] is a reference to thread-local
10555 data and load its address to a register. */
10556 if (tls_referenced_p (operands[1]))
10558 enum tls_model model;
10559 rtx tmp = operands[1];
10560 rtx addend = NULL;
10562 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10564 addend = XEXP (XEXP (tmp, 0), 1);
10565 tmp = XEXP (XEXP (tmp, 0), 0);
10568 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10569 model = SYMBOL_REF_TLS_MODEL (tmp);
10570 gcc_assert (model != 0);
10572 tmp = rs6000_legitimize_tls_address (tmp, model);
10573 if (addend)
10575 tmp = gen_rtx_PLUS (mode, tmp, addend);
10576 tmp = force_operand (tmp, operands[0]);
10578 operands[1] = tmp;
10581 /* 128-bit constant floating-point values on Darwin should really be loaded
10582 as two parts. However, this premature splitting is a problem when DFmode
10583 values can go into Altivec registers. */
10584 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10585 && GET_CODE (operands[1]) == CONST_DOUBLE)
10587 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10588 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10589 DFmode);
10590 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10591 GET_MODE_SIZE (DFmode)),
10592 simplify_gen_subreg (DFmode, operands[1], mode,
10593 GET_MODE_SIZE (DFmode)),
10594 DFmode);
10595 return;
10598 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10599 p1:SD) if p1 is not of floating point class and p0 is spilled as
10600 we can have no analogous movsd_store for this. */
10601 if (lra_in_progress && mode == DDmode
10602 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10603 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10604 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10605 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10607 enum reg_class cl;
10608 int regno = REGNO (SUBREG_REG (operands[1]));
10610 if (regno >= FIRST_PSEUDO_REGISTER)
10612 cl = reg_preferred_class (regno);
10613 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10615 if (regno >= 0 && ! FP_REGNO_P (regno))
10617 mode = SDmode;
10618 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10619 operands[1] = SUBREG_REG (operands[1]);
10622 if (lra_in_progress
10623 && mode == SDmode
10624 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10625 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10626 && (REG_P (operands[1])
10627 || (GET_CODE (operands[1]) == SUBREG
10628 && REG_P (SUBREG_REG (operands[1])))))
10630 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10631 ? SUBREG_REG (operands[1]) : operands[1]);
10632 enum reg_class cl;
10634 if (regno >= FIRST_PSEUDO_REGISTER)
10636 cl = reg_preferred_class (regno);
10637 gcc_assert (cl != NO_REGS);
10638 regno = ira_class_hard_regs[cl][0];
10640 if (FP_REGNO_P (regno))
10642 if (GET_MODE (operands[0]) != DDmode)
10643 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10644 emit_insn (gen_movsd_store (operands[0], operands[1]));
10646 else if (INT_REGNO_P (regno))
10647 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10648 else
10649 gcc_unreachable();
10650 return;
10652 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10653 p:DD)) if p0 is not of floating point class and p1 is spilled as
10654 we can have no analogous movsd_load for this. */
10655 if (lra_in_progress && mode == DDmode
10656 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10657 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10658 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10659 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10661 enum reg_class cl;
10662 int regno = REGNO (SUBREG_REG (operands[0]));
10664 if (regno >= FIRST_PSEUDO_REGISTER)
10666 cl = reg_preferred_class (regno);
10667 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10669 if (regno >= 0 && ! FP_REGNO_P (regno))
10671 mode = SDmode;
10672 operands[0] = SUBREG_REG (operands[0]);
10673 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10676 if (lra_in_progress
10677 && mode == SDmode
10678 && (REG_P (operands[0])
10679 || (GET_CODE (operands[0]) == SUBREG
10680 && REG_P (SUBREG_REG (operands[0]))))
10681 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10682 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10684 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10685 ? SUBREG_REG (operands[0]) : operands[0]);
10686 enum reg_class cl;
10688 if (regno >= FIRST_PSEUDO_REGISTER)
10690 cl = reg_preferred_class (regno);
10691 gcc_assert (cl != NO_REGS);
10692 regno = ira_class_hard_regs[cl][0];
10694 if (FP_REGNO_P (regno))
10696 if (GET_MODE (operands[1]) != DDmode)
10697 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10698 emit_insn (gen_movsd_load (operands[0], operands[1]));
10700 else if (INT_REGNO_P (regno))
10701 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10702 else
10703 gcc_unreachable();
10704 return;
10707 /* FIXME: In the long term, this switch statement should go away
10708 and be replaced by a sequence of tests based on things like
10709 mode == Pmode. */
10710 switch (mode)
10712 case E_HImode:
10713 case E_QImode:
10714 if (CONSTANT_P (operands[1])
10715 && GET_CODE (operands[1]) != CONST_INT)
10716 operands[1] = force_const_mem (mode, operands[1]);
10717 break;
10719 case E_TFmode:
10720 case E_TDmode:
10721 case E_IFmode:
10722 case E_KFmode:
10723 if (FLOAT128_2REG_P (mode))
10724 rs6000_eliminate_indexed_memrefs (operands);
10725 /* fall through */
10727 case E_DFmode:
10728 case E_DDmode:
10729 case E_SFmode:
10730 case E_SDmode:
10731 if (CONSTANT_P (operands[1])
10732 && ! easy_fp_constant (operands[1], mode))
10733 operands[1] = force_const_mem (mode, operands[1]);
10734 break;
10736 case E_V16QImode:
10737 case E_V8HImode:
10738 case E_V4SFmode:
10739 case E_V4SImode:
10740 case E_V2SFmode:
10741 case E_V2SImode:
10742 case E_V2DFmode:
10743 case E_V2DImode:
10744 case E_V1TImode:
10745 if (CONSTANT_P (operands[1])
10746 && !easy_vector_constant (operands[1], mode))
10747 operands[1] = force_const_mem (mode, operands[1]);
10748 break;
10750 case E_SImode:
10751 case E_DImode:
10752 /* Use default pattern for address of ELF small data */
10753 if (TARGET_ELF
10754 && mode == Pmode
10755 && DEFAULT_ABI == ABI_V4
10756 && (GET_CODE (operands[1]) == SYMBOL_REF
10757 || GET_CODE (operands[1]) == CONST)
10758 && small_data_operand (operands[1], mode))
10760 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10761 return;
10764 if (DEFAULT_ABI == ABI_V4
10765 && mode == Pmode && mode == SImode
10766 && flag_pic == 1 && got_operand (operands[1], mode))
10768 emit_insn (gen_movsi_got (operands[0], operands[1]));
10769 return;
10772 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10773 && TARGET_NO_TOC
10774 && ! flag_pic
10775 && mode == Pmode
10776 && CONSTANT_P (operands[1])
10777 && GET_CODE (operands[1]) != HIGH
10778 && GET_CODE (operands[1]) != CONST_INT)
10780 rtx target = (!can_create_pseudo_p ()
10781 ? operands[0]
10782 : gen_reg_rtx (mode));
10784 /* If this is a function address on -mcall-aixdesc,
10785 convert it to the address of the descriptor. */
10786 if (DEFAULT_ABI == ABI_AIX
10787 && GET_CODE (operands[1]) == SYMBOL_REF
10788 && XSTR (operands[1], 0)[0] == '.')
10790 const char *name = XSTR (operands[1], 0);
10791 rtx new_ref;
10792 while (*name == '.')
10793 name++;
10794 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10795 CONSTANT_POOL_ADDRESS_P (new_ref)
10796 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10797 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10798 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10799 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10800 operands[1] = new_ref;
10803 if (DEFAULT_ABI == ABI_DARWIN)
10805 #if TARGET_MACHO
10806 if (MACHO_DYNAMIC_NO_PIC_P)
10808 /* Take care of any required data indirection. */
10809 operands[1] = rs6000_machopic_legitimize_pic_address (
10810 operands[1], mode, operands[0]);
10811 if (operands[0] != operands[1])
10812 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10813 return;
10815 #endif
10816 emit_insn (gen_macho_high (target, operands[1]));
10817 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10818 return;
10821 emit_insn (gen_elf_high (target, operands[1]));
10822 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10823 return;
10826 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10827 and we have put it in the TOC, we just need to make a TOC-relative
10828 reference to it. */
10829 if (TARGET_TOC
10830 && GET_CODE (operands[1]) == SYMBOL_REF
10831 && use_toc_relative_ref (operands[1], mode))
10832 operands[1] = create_TOC_reference (operands[1], operands[0]);
10833 else if (mode == Pmode
10834 && CONSTANT_P (operands[1])
10835 && GET_CODE (operands[1]) != HIGH
10836 && ((GET_CODE (operands[1]) != CONST_INT
10837 && ! easy_fp_constant (operands[1], mode))
10838 || (GET_CODE (operands[1]) == CONST_INT
10839 && (num_insns_constant (operands[1], mode)
10840 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10841 || (GET_CODE (operands[0]) == REG
10842 && FP_REGNO_P (REGNO (operands[0]))))
10843 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10844 && (TARGET_CMODEL == CMODEL_SMALL
10845 || can_create_pseudo_p ()
10846 || (REG_P (operands[0])
10847 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10850 #if TARGET_MACHO
10851 /* Darwin uses a special PIC legitimizer. */
10852 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10854 operands[1] =
10855 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10856 operands[0]);
10857 if (operands[0] != operands[1])
10858 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10859 return;
10861 #endif
10863 /* If we are to limit the number of things we put in the TOC and
10864 this is a symbol plus a constant we can add in one insn,
10865 just put the symbol in the TOC and add the constant. */
10866 if (GET_CODE (operands[1]) == CONST
10867 && TARGET_NO_SUM_IN_TOC
10868 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10869 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10870 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10871 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10872 && ! side_effects_p (operands[0]))
10874 rtx sym =
10875 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10876 rtx other = XEXP (XEXP (operands[1], 0), 1);
10878 sym = force_reg (mode, sym);
10879 emit_insn (gen_add3_insn (operands[0], sym, other));
10880 return;
10883 operands[1] = force_const_mem (mode, operands[1]);
10885 if (TARGET_TOC
10886 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10887 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10889 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10890 operands[0]);
10891 operands[1] = gen_const_mem (mode, tocref);
10892 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10895 break;
10897 case E_TImode:
10898 if (!VECTOR_MEM_VSX_P (TImode))
10899 rs6000_eliminate_indexed_memrefs (operands);
10900 break;
10902 case E_PTImode:
10903 rs6000_eliminate_indexed_memrefs (operands);
10904 break;
10906 default:
10907 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10910 /* Above, we may have called force_const_mem which may have returned
10911 an invalid address. If we can, fix this up; otherwise, reload will
10912 have to deal with it. */
10913 if (GET_CODE (operands[1]) == MEM)
10914 operands[1] = validize_mem (operands[1]);
10916 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10919 /* Nonzero if we can use a floating-point register to pass this arg. */
10920 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10921 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10922 && (CUM)->fregno <= FP_ARG_MAX_REG \
10923 && TARGET_HARD_FLOAT)
10925 /* Nonzero if we can use an AltiVec register to pass this arg. */
10926 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10927 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10928 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10929 && TARGET_ALTIVEC_ABI \
10930 && (NAMED))
10932 /* Walk down the type tree of TYPE counting consecutive base elements.
10933 If *MODEP is VOIDmode, then set it to the first valid floating point
10934 or vector type. If a non-floating point or vector type is found, or
10935 if a floating point or vector type that doesn't match a non-VOIDmode
10936 *MODEP is found, then return -1, otherwise return the count in the
10937 sub-tree. */
10939 static int
10940 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10942 machine_mode mode;
10943 HOST_WIDE_INT size;
10945 switch (TREE_CODE (type))
10947 case REAL_TYPE:
10948 mode = TYPE_MODE (type);
10949 if (!SCALAR_FLOAT_MODE_P (mode))
10950 return -1;
10952 if (*modep == VOIDmode)
10953 *modep = mode;
10955 if (*modep == mode)
10956 return 1;
10958 break;
10960 case COMPLEX_TYPE:
10961 mode = TYPE_MODE (TREE_TYPE (type));
10962 if (!SCALAR_FLOAT_MODE_P (mode))
10963 return -1;
10965 if (*modep == VOIDmode)
10966 *modep = mode;
10968 if (*modep == mode)
10969 return 2;
10971 break;
10973 case VECTOR_TYPE:
10974 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10975 return -1;
10977 /* Use V4SImode as representative of all 128-bit vector types. */
10978 size = int_size_in_bytes (type);
10979 switch (size)
10981 case 16:
10982 mode = V4SImode;
10983 break;
10984 default:
10985 return -1;
10988 if (*modep == VOIDmode)
10989 *modep = mode;
10991 /* Vector modes are considered to be opaque: two vectors are
10992 equivalent for the purposes of being homogeneous aggregates
10993 if they are the same size. */
10994 if (*modep == mode)
10995 return 1;
10997 break;
10999 case ARRAY_TYPE:
11001 int count;
11002 tree index = TYPE_DOMAIN (type);
11004 /* Can't handle incomplete types nor sizes that are not
11005 fixed. */
11006 if (!COMPLETE_TYPE_P (type)
11007 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11008 return -1;
11010 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11011 if (count == -1
11012 || !index
11013 || !TYPE_MAX_VALUE (index)
11014 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11015 || !TYPE_MIN_VALUE (index)
11016 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11017 || count < 0)
11018 return -1;
11020 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11021 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11023 /* There must be no padding. */
11024 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11025 return -1;
11027 return count;
11030 case RECORD_TYPE:
11032 int count = 0;
11033 int sub_count;
11034 tree field;
11036 /* Can't handle incomplete types nor sizes that are not
11037 fixed. */
11038 if (!COMPLETE_TYPE_P (type)
11039 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11040 return -1;
11042 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11044 if (TREE_CODE (field) != FIELD_DECL)
11045 continue;
11047 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11048 if (sub_count < 0)
11049 return -1;
11050 count += sub_count;
11053 /* There must be no padding. */
11054 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11055 return -1;
11057 return count;
11060 case UNION_TYPE:
11061 case QUAL_UNION_TYPE:
11063 /* These aren't very interesting except in a degenerate case. */
11064 int count = 0;
11065 int sub_count;
11066 tree field;
11068 /* Can't handle incomplete types nor sizes that are not
11069 fixed. */
11070 if (!COMPLETE_TYPE_P (type)
11071 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11072 return -1;
11074 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11076 if (TREE_CODE (field) != FIELD_DECL)
11077 continue;
11079 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11080 if (sub_count < 0)
11081 return -1;
11082 count = count > sub_count ? count : sub_count;
11085 /* There must be no padding. */
11086 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11087 return -1;
11089 return count;
11092 default:
11093 break;
11096 return -1;
11099 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11100 float or vector aggregate that shall be passed in FP/vector registers
11101 according to the ELFv2 ABI, return the homogeneous element mode in
11102 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11104 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11106 static bool
11107 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11108 machine_mode *elt_mode,
11109 int *n_elts)
11111 /* Note that we do not accept complex types at the top level as
11112 homogeneous aggregates; these types are handled via the
11113 targetm.calls.split_complex_arg mechanism. Complex types
11114 can be elements of homogeneous aggregates, however. */
11115 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11117 machine_mode field_mode = VOIDmode;
11118 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11120 if (field_count > 0)
11122 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11123 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11125 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11126 up to AGGR_ARG_NUM_REG registers. */
11127 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11129 if (elt_mode)
11130 *elt_mode = field_mode;
11131 if (n_elts)
11132 *n_elts = field_count;
11133 return true;
11138 if (elt_mode)
11139 *elt_mode = mode;
11140 if (n_elts)
11141 *n_elts = 1;
11142 return false;
11145 /* Return a nonzero value to say to return the function value in
11146 memory, just as large structures are always returned. TYPE will be
11147 the data type of the value, and FNTYPE will be the type of the
11148 function doing the returning, or @code{NULL} for libcalls.
11150 The AIX ABI for the RS/6000 specifies that all structures are
11151 returned in memory. The Darwin ABI does the same.
11153 For the Darwin 64 Bit ABI, a function result can be returned in
11154 registers or in memory, depending on the size of the return data
11155 type. If it is returned in registers, the value occupies the same
11156 registers as it would if it were the first and only function
11157 argument. Otherwise, the function places its result in memory at
11158 the location pointed to by GPR3.
11160 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11161 but a draft put them in memory, and GCC used to implement the draft
11162 instead of the final standard. Therefore, aix_struct_return
11163 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11164 compatibility can change DRAFT_V4_STRUCT_RET to override the
11165 default, and -m switches get the final word. See
11166 rs6000_option_override_internal for more details.
11168 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11169 long double support is enabled. These values are returned in memory.
11171 int_size_in_bytes returns -1 for variable size objects, which go in
11172 memory always. The cast to unsigned makes -1 > 8. */
11174 static bool
11175 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11177 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11178 if (TARGET_MACHO
11179 && rs6000_darwin64_abi
11180 && TREE_CODE (type) == RECORD_TYPE
11181 && int_size_in_bytes (type) > 0)
11183 CUMULATIVE_ARGS valcum;
11184 rtx valret;
11186 valcum.words = 0;
11187 valcum.fregno = FP_ARG_MIN_REG;
11188 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11189 /* Do a trial code generation as if this were going to be passed
11190 as an argument; if any part goes in memory, we return NULL. */
11191 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11192 if (valret)
11193 return false;
11194 /* Otherwise fall through to more conventional ABI rules. */
11197 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11198 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11199 NULL, NULL))
11200 return false;
11202 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11203 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11204 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11205 return false;
11207 if (AGGREGATE_TYPE_P (type)
11208 && (aix_struct_return
11209 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11210 return true;
11212 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11213 modes only exist for GCC vector types if -maltivec. */
11214 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11215 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11216 return false;
11218 /* Return synthetic vectors in memory. */
11219 if (TREE_CODE (type) == VECTOR_TYPE
11220 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11222 static bool warned_for_return_big_vectors = false;
11223 if (!warned_for_return_big_vectors)
11225 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11226 "non-standard ABI extension with no compatibility "
11227 "guarantee");
11228 warned_for_return_big_vectors = true;
11230 return true;
11233 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11234 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11235 return true;
11237 return false;
11240 /* Specify whether values returned in registers should be at the most
11241 significant end of a register. We want aggregates returned by
11242 value to match the way aggregates are passed to functions. */
11244 static bool
11245 rs6000_return_in_msb (const_tree valtype)
11247 return (DEFAULT_ABI == ABI_ELFv2
11248 && BYTES_BIG_ENDIAN
11249 && AGGREGATE_TYPE_P (valtype)
11250 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11251 == PAD_UPWARD));
11254 #ifdef HAVE_AS_GNU_ATTRIBUTE
11255 /* Return TRUE if a call to function FNDECL may be one that
11256 potentially affects the function calling ABI of the object file. */
11258 static bool
11259 call_ABI_of_interest (tree fndecl)
11261 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11263 struct cgraph_node *c_node;
11265 /* Libcalls are always interesting. */
11266 if (fndecl == NULL_TREE)
11267 return true;
11269 /* Any call to an external function is interesting. */
11270 if (DECL_EXTERNAL (fndecl))
11271 return true;
11273 /* Interesting functions that we are emitting in this object file. */
11274 c_node = cgraph_node::get (fndecl);
11275 c_node = c_node->ultimate_alias_target ();
11276 return !c_node->only_called_directly_p ();
11278 return false;
11280 #endif
11282 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11283 for a call to a function whose data type is FNTYPE.
11284 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11286 For incoming args we set the number of arguments in the prototype large
11287 so we never return a PARALLEL. */
11289 void
11290 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11291 rtx libname ATTRIBUTE_UNUSED, int incoming,
11292 int libcall, int n_named_args,
11293 tree fndecl ATTRIBUTE_UNUSED,
11294 machine_mode return_mode ATTRIBUTE_UNUSED)
11296 static CUMULATIVE_ARGS zero_cumulative;
11298 *cum = zero_cumulative;
11299 cum->words = 0;
11300 cum->fregno = FP_ARG_MIN_REG;
11301 cum->vregno = ALTIVEC_ARG_MIN_REG;
11302 cum->prototype = (fntype && prototype_p (fntype));
11303 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11304 ? CALL_LIBCALL : CALL_NORMAL);
11305 cum->sysv_gregno = GP_ARG_MIN_REG;
11306 cum->stdarg = stdarg_p (fntype);
11307 cum->libcall = libcall;
11309 cum->nargs_prototype = 0;
11310 if (incoming || cum->prototype)
11311 cum->nargs_prototype = n_named_args;
11313 /* Check for a longcall attribute. */
11314 if ((!fntype && rs6000_default_long_calls)
11315 || (fntype
11316 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11317 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11318 cum->call_cookie |= CALL_LONG;
11320 if (TARGET_DEBUG_ARG)
11322 fprintf (stderr, "\ninit_cumulative_args:");
11323 if (fntype)
11325 tree ret_type = TREE_TYPE (fntype);
11326 fprintf (stderr, " ret code = %s,",
11327 get_tree_code_name (TREE_CODE (ret_type)));
11330 if (cum->call_cookie & CALL_LONG)
11331 fprintf (stderr, " longcall,");
11333 fprintf (stderr, " proto = %d, nargs = %d\n",
11334 cum->prototype, cum->nargs_prototype);
11337 #ifdef HAVE_AS_GNU_ATTRIBUTE
11338 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11340 cum->escapes = call_ABI_of_interest (fndecl);
11341 if (cum->escapes)
11343 tree return_type;
11345 if (fntype)
11347 return_type = TREE_TYPE (fntype);
11348 return_mode = TYPE_MODE (return_type);
11350 else
11351 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11353 if (return_type != NULL)
11355 if (TREE_CODE (return_type) == RECORD_TYPE
11356 && TYPE_TRANSPARENT_AGGR (return_type))
11358 return_type = TREE_TYPE (first_field (return_type));
11359 return_mode = TYPE_MODE (return_type);
11361 if (AGGREGATE_TYPE_P (return_type)
11362 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11363 <= 8))
11364 rs6000_returns_struct = true;
11366 if (SCALAR_FLOAT_MODE_P (return_mode))
11368 rs6000_passes_float = true;
11369 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11370 && (FLOAT128_IBM_P (return_mode)
11371 || FLOAT128_IEEE_P (return_mode)
11372 || (return_type != NULL
11373 && (TYPE_MAIN_VARIANT (return_type)
11374 == long_double_type_node))))
11375 rs6000_passes_long_double = true;
11377 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11378 || PAIRED_VECTOR_MODE (return_mode))
11379 rs6000_passes_vector = true;
11382 #endif
11384 if (fntype
11385 && !TARGET_ALTIVEC
11386 && TARGET_ALTIVEC_ABI
11387 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11389 error ("cannot return value in vector register because"
11390 " altivec instructions are disabled, use %qs"
11391 " to enable them", "-maltivec");
11395 /* The mode the ABI uses for a word. This is not the same as word_mode
11396 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11398 static scalar_int_mode
11399 rs6000_abi_word_mode (void)
11401 return TARGET_32BIT ? SImode : DImode;
11404 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11405 static char *
11406 rs6000_offload_options (void)
11408 if (TARGET_64BIT)
11409 return xstrdup ("-foffload-abi=lp64");
11410 else
11411 return xstrdup ("-foffload-abi=ilp32");
11414 /* On rs6000, function arguments are promoted, as are function return
11415 values. */
11417 static machine_mode
11418 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11419 machine_mode mode,
11420 int *punsignedp ATTRIBUTE_UNUSED,
11421 const_tree, int)
11423 PROMOTE_MODE (mode, *punsignedp, type);
11425 return mode;
11428 /* Return true if TYPE must be passed on the stack and not in registers. */
11430 static bool
11431 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11433 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11434 return must_pass_in_stack_var_size (mode, type);
11435 else
11436 return must_pass_in_stack_var_size_or_pad (mode, type);
11439 static inline bool
11440 is_complex_IBM_long_double (machine_mode mode)
11442 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11445 /* Whether ABI_V4 passes MODE args to a function in floating point
11446 registers. */
11448 static bool
11449 abi_v4_pass_in_fpr (machine_mode mode)
11451 if (!TARGET_HARD_FLOAT)
11452 return false;
11453 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11454 return true;
11455 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11456 return true;
11457 /* ABI_V4 passes complex IBM long double in 8 gprs.
11458 Stupid, but we can't change the ABI now. */
11459 if (is_complex_IBM_long_double (mode))
11460 return false;
11461 if (FLOAT128_2REG_P (mode))
11462 return true;
11463 if (DECIMAL_FLOAT_MODE_P (mode))
11464 return true;
11465 return false;
11468 /* Implement TARGET_FUNCTION_ARG_PADDING.
11470 For the AIX ABI structs are always stored left shifted in their
11471 argument slot. */
11473 static pad_direction
11474 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11476 #ifndef AGGREGATE_PADDING_FIXED
11477 #define AGGREGATE_PADDING_FIXED 0
11478 #endif
11479 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11480 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11481 #endif
11483 if (!AGGREGATE_PADDING_FIXED)
11485 /* GCC used to pass structures of the same size as integer types as
11486 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11487 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11488 passed padded downward, except that -mstrict-align further
11489 muddied the water in that multi-component structures of 2 and 4
11490 bytes in size were passed padded upward.
11492 The following arranges for best compatibility with previous
11493 versions of gcc, but removes the -mstrict-align dependency. */
11494 if (BYTES_BIG_ENDIAN)
11496 HOST_WIDE_INT size = 0;
11498 if (mode == BLKmode)
11500 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11501 size = int_size_in_bytes (type);
11503 else
11504 size = GET_MODE_SIZE (mode);
11506 if (size == 1 || size == 2 || size == 4)
11507 return PAD_DOWNWARD;
11509 return PAD_UPWARD;
11512 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11514 if (type != 0 && AGGREGATE_TYPE_P (type))
11515 return PAD_UPWARD;
11518 /* Fall back to the default. */
11519 return default_function_arg_padding (mode, type);
11522 /* If defined, a C expression that gives the alignment boundary, in bits,
11523 of an argument with the specified mode and type. If it is not defined,
11524 PARM_BOUNDARY is used for all arguments.
11526 V.4 wants long longs and doubles to be double word aligned. Just
11527 testing the mode size is a boneheaded way to do this as it means
11528 that other types such as complex int are also double word aligned.
11529 However, we're stuck with this because changing the ABI might break
11530 existing library interfaces.
11532 Quadword align Altivec/VSX vectors.
11533 Quadword align large synthetic vector types. */
11535 static unsigned int
11536 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11538 machine_mode elt_mode;
11539 int n_elts;
11541 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11543 if (DEFAULT_ABI == ABI_V4
11544 && (GET_MODE_SIZE (mode) == 8
11545 || (TARGET_HARD_FLOAT
11546 && !is_complex_IBM_long_double (mode)
11547 && FLOAT128_2REG_P (mode))))
11548 return 64;
11549 else if (FLOAT128_VECTOR_P (mode))
11550 return 128;
11551 else if (PAIRED_VECTOR_MODE (mode)
11552 || (type && TREE_CODE (type) == VECTOR_TYPE
11553 && int_size_in_bytes (type) >= 8
11554 && int_size_in_bytes (type) < 16))
11555 return 64;
11556 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11557 || (type && TREE_CODE (type) == VECTOR_TYPE
11558 && int_size_in_bytes (type) >= 16))
11559 return 128;
11561 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11562 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11563 -mcompat-align-parm is used. */
11564 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11565 || DEFAULT_ABI == ABI_ELFv2)
11566 && type && TYPE_ALIGN (type) > 64)
11568 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11569 or homogeneous float/vector aggregates here. We already handled
11570 vector aggregates above, but still need to check for float here. */
11571 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11572 && !SCALAR_FLOAT_MODE_P (elt_mode));
11574 /* We used to check for BLKmode instead of the above aggregate type
11575 check. Warn when this results in any difference to the ABI. */
11576 if (aggregate_p != (mode == BLKmode))
11578 static bool warned;
11579 if (!warned && warn_psabi)
11581 warned = true;
11582 inform (input_location,
11583 "the ABI of passing aggregates with %d-byte alignment"
11584 " has changed in GCC 5",
11585 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11589 if (aggregate_p)
11590 return 128;
11593 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11594 implement the "aggregate type" check as a BLKmode check here; this
11595 means certain aggregate types are in fact not aligned. */
11596 if (TARGET_MACHO && rs6000_darwin64_abi
11597 && mode == BLKmode
11598 && type && TYPE_ALIGN (type) > 64)
11599 return 128;
11601 return PARM_BOUNDARY;
11604 /* The offset in words to the start of the parameter save area. */
11606 static unsigned int
11607 rs6000_parm_offset (void)
11609 return (DEFAULT_ABI == ABI_V4 ? 2
11610 : DEFAULT_ABI == ABI_ELFv2 ? 4
11611 : 6);
11614 /* For a function parm of MODE and TYPE, return the starting word in
11615 the parameter area. NWORDS of the parameter area are already used. */
11617 static unsigned int
11618 rs6000_parm_start (machine_mode mode, const_tree type,
11619 unsigned int nwords)
11621 unsigned int align;
11623 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11624 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11627 /* Compute the size (in words) of a function argument. */
11629 static unsigned long
11630 rs6000_arg_size (machine_mode mode, const_tree type)
11632 unsigned long size;
11634 if (mode != BLKmode)
11635 size = GET_MODE_SIZE (mode);
11636 else
11637 size = int_size_in_bytes (type);
11639 if (TARGET_32BIT)
11640 return (size + 3) >> 2;
11641 else
11642 return (size + 7) >> 3;
11645 /* Use this to flush pending int fields. */
11647 static void
11648 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11649 HOST_WIDE_INT bitpos, int final)
11651 unsigned int startbit, endbit;
11652 int intregs, intoffset;
11654 /* Handle the situations where a float is taking up the first half
11655 of the GPR, and the other half is empty (typically due to
11656 alignment restrictions). We can detect this by a 8-byte-aligned
11657 int field, or by seeing that this is the final flush for this
11658 argument. Count the word and continue on. */
11659 if (cum->floats_in_gpr == 1
11660 && (cum->intoffset % 64 == 0
11661 || (cum->intoffset == -1 && final)))
11663 cum->words++;
11664 cum->floats_in_gpr = 0;
11667 if (cum->intoffset == -1)
11668 return;
11670 intoffset = cum->intoffset;
11671 cum->intoffset = -1;
11672 cum->floats_in_gpr = 0;
11674 if (intoffset % BITS_PER_WORD != 0)
11676 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11677 if (!int_mode_for_size (bits, 0).exists ())
11679 /* We couldn't find an appropriate mode, which happens,
11680 e.g., in packed structs when there are 3 bytes to load.
11681 Back intoffset back to the beginning of the word in this
11682 case. */
11683 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11687 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11688 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11689 intregs = (endbit - startbit) / BITS_PER_WORD;
11690 cum->words += intregs;
11691 /* words should be unsigned. */
11692 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11694 int pad = (endbit/BITS_PER_WORD) - cum->words;
11695 cum->words += pad;
11699 /* The darwin64 ABI calls for us to recurse down through structs,
11700 looking for elements passed in registers. Unfortunately, we have
11701 to track int register count here also because of misalignments
11702 in powerpc alignment mode. */
11704 static void
11705 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11706 const_tree type,
11707 HOST_WIDE_INT startbitpos)
11709 tree f;
11711 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11712 if (TREE_CODE (f) == FIELD_DECL)
11714 HOST_WIDE_INT bitpos = startbitpos;
11715 tree ftype = TREE_TYPE (f);
11716 machine_mode mode;
11717 if (ftype == error_mark_node)
11718 continue;
11719 mode = TYPE_MODE (ftype);
11721 if (DECL_SIZE (f) != 0
11722 && tree_fits_uhwi_p (bit_position (f)))
11723 bitpos += int_bit_position (f);
11725 /* ??? FIXME: else assume zero offset. */
11727 if (TREE_CODE (ftype) == RECORD_TYPE)
11728 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11729 else if (USE_FP_FOR_ARG_P (cum, mode))
11731 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11732 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11733 cum->fregno += n_fpregs;
11734 /* Single-precision floats present a special problem for
11735 us, because they are smaller than an 8-byte GPR, and so
11736 the structure-packing rules combined with the standard
11737 varargs behavior mean that we want to pack float/float
11738 and float/int combinations into a single register's
11739 space. This is complicated by the arg advance flushing,
11740 which works on arbitrarily large groups of int-type
11741 fields. */
11742 if (mode == SFmode)
11744 if (cum->floats_in_gpr == 1)
11746 /* Two floats in a word; count the word and reset
11747 the float count. */
11748 cum->words++;
11749 cum->floats_in_gpr = 0;
11751 else if (bitpos % 64 == 0)
11753 /* A float at the beginning of an 8-byte word;
11754 count it and put off adjusting cum->words until
11755 we see if a arg advance flush is going to do it
11756 for us. */
11757 cum->floats_in_gpr++;
11759 else
11761 /* The float is at the end of a word, preceded
11762 by integer fields, so the arg advance flush
11763 just above has already set cum->words and
11764 everything is taken care of. */
11767 else
11768 cum->words += n_fpregs;
11770 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11772 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11773 cum->vregno++;
11774 cum->words += 2;
11776 else if (cum->intoffset == -1)
11777 cum->intoffset = bitpos;
11781 /* Check for an item that needs to be considered specially under the darwin 64
11782 bit ABI. These are record types where the mode is BLK or the structure is
11783 8 bytes in size. */
11784 static int
11785 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11787 return rs6000_darwin64_abi
11788 && ((mode == BLKmode
11789 && TREE_CODE (type) == RECORD_TYPE
11790 && int_size_in_bytes (type) > 0)
11791 || (type && TREE_CODE (type) == RECORD_TYPE
11792 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11795 /* Update the data in CUM to advance over an argument
11796 of mode MODE and data type TYPE.
11797 (TYPE is null for libcalls where that information may not be available.)
11799 Note that for args passed by reference, function_arg will be called
11800 with MODE and TYPE set to that of the pointer to the arg, not the arg
11801 itself. */
11803 static void
11804 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11805 const_tree type, bool named, int depth)
11807 machine_mode elt_mode;
11808 int n_elts;
11810 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11812 /* Only tick off an argument if we're not recursing. */
11813 if (depth == 0)
11814 cum->nargs_prototype--;
11816 #ifdef HAVE_AS_GNU_ATTRIBUTE
11817 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11818 && cum->escapes)
11820 if (SCALAR_FLOAT_MODE_P (mode))
11822 rs6000_passes_float = true;
11823 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11824 && (FLOAT128_IBM_P (mode)
11825 || FLOAT128_IEEE_P (mode)
11826 || (type != NULL
11827 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11828 rs6000_passes_long_double = true;
11830 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11831 || (PAIRED_VECTOR_MODE (mode)
11832 && !cum->stdarg
11833 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11834 rs6000_passes_vector = true;
11836 #endif
11838 if (TARGET_ALTIVEC_ABI
11839 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11840 || (type && TREE_CODE (type) == VECTOR_TYPE
11841 && int_size_in_bytes (type) == 16)))
11843 bool stack = false;
11845 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11847 cum->vregno += n_elts;
11849 if (!TARGET_ALTIVEC)
11850 error ("cannot pass argument in vector register because"
11851 " altivec instructions are disabled, use %qs"
11852 " to enable them", "-maltivec");
11854 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11855 even if it is going to be passed in a vector register.
11856 Darwin does the same for variable-argument functions. */
11857 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11858 && TARGET_64BIT)
11859 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11860 stack = true;
11862 else
11863 stack = true;
11865 if (stack)
11867 int align;
11869 /* Vector parameters must be 16-byte aligned. In 32-bit
11870 mode this means we need to take into account the offset
11871 to the parameter save area. In 64-bit mode, they just
11872 have to start on an even word, since the parameter save
11873 area is 16-byte aligned. */
11874 if (TARGET_32BIT)
11875 align = -(rs6000_parm_offset () + cum->words) & 3;
11876 else
11877 align = cum->words & 1;
11878 cum->words += align + rs6000_arg_size (mode, type);
11880 if (TARGET_DEBUG_ARG)
11882 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11883 cum->words, align);
11884 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11885 cum->nargs_prototype, cum->prototype,
11886 GET_MODE_NAME (mode));
11890 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11892 int size = int_size_in_bytes (type);
11893 /* Variable sized types have size == -1 and are
11894 treated as if consisting entirely of ints.
11895 Pad to 16 byte boundary if needed. */
11896 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11897 && (cum->words % 2) != 0)
11898 cum->words++;
11899 /* For varargs, we can just go up by the size of the struct. */
11900 if (!named)
11901 cum->words += (size + 7) / 8;
11902 else
11904 /* It is tempting to say int register count just goes up by
11905 sizeof(type)/8, but this is wrong in a case such as
11906 { int; double; int; } [powerpc alignment]. We have to
11907 grovel through the fields for these too. */
11908 cum->intoffset = 0;
11909 cum->floats_in_gpr = 0;
11910 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11911 rs6000_darwin64_record_arg_advance_flush (cum,
11912 size * BITS_PER_UNIT, 1);
11914 if (TARGET_DEBUG_ARG)
11916 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11917 cum->words, TYPE_ALIGN (type), size);
11918 fprintf (stderr,
11919 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11920 cum->nargs_prototype, cum->prototype,
11921 GET_MODE_NAME (mode));
11924 else if (DEFAULT_ABI == ABI_V4)
11926 if (abi_v4_pass_in_fpr (mode))
11928 /* _Decimal128 must use an even/odd register pair. This assumes
11929 that the register number is odd when fregno is odd. */
11930 if (mode == TDmode && (cum->fregno % 2) == 1)
11931 cum->fregno++;
11933 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11934 <= FP_ARG_V4_MAX_REG)
11935 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11936 else
11938 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11939 if (mode == DFmode || FLOAT128_IBM_P (mode)
11940 || mode == DDmode || mode == TDmode)
11941 cum->words += cum->words & 1;
11942 cum->words += rs6000_arg_size (mode, type);
11945 else
11947 int n_words = rs6000_arg_size (mode, type);
11948 int gregno = cum->sysv_gregno;
11950 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11951 As does any other 2 word item such as complex int due to a
11952 historical mistake. */
11953 if (n_words == 2)
11954 gregno += (1 - gregno) & 1;
11956 /* Multi-reg args are not split between registers and stack. */
11957 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11959 /* Long long is aligned on the stack. So are other 2 word
11960 items such as complex int due to a historical mistake. */
11961 if (n_words == 2)
11962 cum->words += cum->words & 1;
11963 cum->words += n_words;
11966 /* Note: continuing to accumulate gregno past when we've started
11967 spilling to the stack indicates the fact that we've started
11968 spilling to the stack to expand_builtin_saveregs. */
11969 cum->sysv_gregno = gregno + n_words;
11972 if (TARGET_DEBUG_ARG)
11974 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11975 cum->words, cum->fregno);
11976 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11977 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11978 fprintf (stderr, "mode = %4s, named = %d\n",
11979 GET_MODE_NAME (mode), named);
11982 else
11984 int n_words = rs6000_arg_size (mode, type);
11985 int start_words = cum->words;
11986 int align_words = rs6000_parm_start (mode, type, start_words);
11988 cum->words = align_words + n_words;
11990 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11992 /* _Decimal128 must be passed in an even/odd float register pair.
11993 This assumes that the register number is odd when fregno is
11994 odd. */
11995 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11996 cum->fregno++;
11997 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12000 if (TARGET_DEBUG_ARG)
12002 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12003 cum->words, cum->fregno);
12004 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12005 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12006 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12007 named, align_words - start_words, depth);
12012 static void
12013 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12014 const_tree type, bool named)
12016 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12020 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12021 structure between cum->intoffset and bitpos to integer registers. */
12023 static void
12024 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12025 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12027 machine_mode mode;
12028 unsigned int regno;
12029 unsigned int startbit, endbit;
12030 int this_regno, intregs, intoffset;
12031 rtx reg;
12033 if (cum->intoffset == -1)
12034 return;
12036 intoffset = cum->intoffset;
12037 cum->intoffset = -1;
12039 /* If this is the trailing part of a word, try to only load that
12040 much into the register. Otherwise load the whole register. Note
12041 that in the latter case we may pick up unwanted bits. It's not a
12042 problem at the moment but may wish to revisit. */
12044 if (intoffset % BITS_PER_WORD != 0)
12046 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
12047 if (!int_mode_for_size (bits, 0).exists (&mode))
12049 /* We couldn't find an appropriate mode, which happens,
12050 e.g., in packed structs when there are 3 bytes to load.
12051 Back intoffset back to the beginning of the word in this
12052 case. */
12053 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12054 mode = word_mode;
12057 else
12058 mode = word_mode;
12060 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12061 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12062 intregs = (endbit - startbit) / BITS_PER_WORD;
12063 this_regno = cum->words + intoffset / BITS_PER_WORD;
12065 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12066 cum->use_stack = 1;
12068 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12069 if (intregs <= 0)
12070 return;
12072 intoffset /= BITS_PER_UNIT;
12075 regno = GP_ARG_MIN_REG + this_regno;
12076 reg = gen_rtx_REG (mode, regno);
12077 rvec[(*k)++] =
12078 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12080 this_regno += 1;
12081 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12082 mode = word_mode;
12083 intregs -= 1;
12085 while (intregs > 0);
12088 /* Recursive workhorse for the following. */
12090 static void
12091 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12092 HOST_WIDE_INT startbitpos, rtx rvec[],
12093 int *k)
12095 tree f;
12097 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12098 if (TREE_CODE (f) == FIELD_DECL)
12100 HOST_WIDE_INT bitpos = startbitpos;
12101 tree ftype = TREE_TYPE (f);
12102 machine_mode mode;
12103 if (ftype == error_mark_node)
12104 continue;
12105 mode = TYPE_MODE (ftype);
12107 if (DECL_SIZE (f) != 0
12108 && tree_fits_uhwi_p (bit_position (f)))
12109 bitpos += int_bit_position (f);
12111 /* ??? FIXME: else assume zero offset. */
12113 if (TREE_CODE (ftype) == RECORD_TYPE)
12114 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12115 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12117 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12118 #if 0
12119 switch (mode)
12121 case E_SCmode: mode = SFmode; break;
12122 case E_DCmode: mode = DFmode; break;
12123 case E_TCmode: mode = TFmode; break;
12124 default: break;
12126 #endif
12127 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12128 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12130 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12131 && (mode == TFmode || mode == TDmode));
12132 /* Long double or _Decimal128 split over regs and memory. */
12133 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12134 cum->use_stack=1;
12136 rvec[(*k)++]
12137 = gen_rtx_EXPR_LIST (VOIDmode,
12138 gen_rtx_REG (mode, cum->fregno++),
12139 GEN_INT (bitpos / BITS_PER_UNIT));
12140 if (FLOAT128_2REG_P (mode))
12141 cum->fregno++;
12143 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12145 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12146 rvec[(*k)++]
12147 = gen_rtx_EXPR_LIST (VOIDmode,
12148 gen_rtx_REG (mode, cum->vregno++),
12149 GEN_INT (bitpos / BITS_PER_UNIT));
12151 else if (cum->intoffset == -1)
12152 cum->intoffset = bitpos;
12156 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12157 the register(s) to be used for each field and subfield of a struct
12158 being passed by value, along with the offset of where the
12159 register's value may be found in the block. FP fields go in FP
12160 register, vector fields go in vector registers, and everything
12161 else goes in int registers, packed as in memory.
12163 This code is also used for function return values. RETVAL indicates
12164 whether this is the case.
12166 Much of this is taken from the SPARC V9 port, which has a similar
12167 calling convention. */
12169 static rtx
12170 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12171 bool named, bool retval)
12173 rtx rvec[FIRST_PSEUDO_REGISTER];
12174 int k = 1, kbase = 1;
12175 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12176 /* This is a copy; modifications are not visible to our caller. */
12177 CUMULATIVE_ARGS copy_cum = *orig_cum;
12178 CUMULATIVE_ARGS *cum = &copy_cum;
12180 /* Pad to 16 byte boundary if needed. */
12181 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12182 && (cum->words % 2) != 0)
12183 cum->words++;
12185 cum->intoffset = 0;
12186 cum->use_stack = 0;
12187 cum->named = named;
12189 /* Put entries into rvec[] for individual FP and vector fields, and
12190 for the chunks of memory that go in int regs. Note we start at
12191 element 1; 0 is reserved for an indication of using memory, and
12192 may or may not be filled in below. */
12193 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12194 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12196 /* If any part of the struct went on the stack put all of it there.
12197 This hack is because the generic code for
12198 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12199 parts of the struct are not at the beginning. */
12200 if (cum->use_stack)
12202 if (retval)
12203 return NULL_RTX; /* doesn't go in registers at all */
12204 kbase = 0;
12205 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12207 if (k > 1 || cum->use_stack)
12208 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12209 else
12210 return NULL_RTX;
12213 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12215 static rtx
12216 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12217 int align_words)
12219 int n_units;
12220 int i, k;
12221 rtx rvec[GP_ARG_NUM_REG + 1];
12223 if (align_words >= GP_ARG_NUM_REG)
12224 return NULL_RTX;
12226 n_units = rs6000_arg_size (mode, type);
12228 /* Optimize the simple case where the arg fits in one gpr, except in
12229 the case of BLKmode due to assign_parms assuming that registers are
12230 BITS_PER_WORD wide. */
12231 if (n_units == 0
12232 || (n_units == 1 && mode != BLKmode))
12233 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12235 k = 0;
12236 if (align_words + n_units > GP_ARG_NUM_REG)
12237 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12238 using a magic NULL_RTX component.
12239 This is not strictly correct. Only some of the arg belongs in
12240 memory, not all of it. However, the normal scheme using
12241 function_arg_partial_nregs can result in unusual subregs, eg.
12242 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12243 store the whole arg to memory is often more efficient than code
12244 to store pieces, and we know that space is available in the right
12245 place for the whole arg. */
12246 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12248 i = 0;
12251 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12252 rtx off = GEN_INT (i++ * 4);
12253 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12255 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12257 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12260 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12261 but must also be copied into the parameter save area starting at
12262 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12263 to the GPRs and/or memory. Return the number of elements used. */
12265 static int
12266 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12267 int align_words, rtx *rvec)
12269 int k = 0;
12271 if (align_words < GP_ARG_NUM_REG)
12273 int n_words = rs6000_arg_size (mode, type);
12275 if (align_words + n_words > GP_ARG_NUM_REG
12276 || mode == BLKmode
12277 || (TARGET_32BIT && TARGET_POWERPC64))
12279 /* If this is partially on the stack, then we only
12280 include the portion actually in registers here. */
12281 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12282 int i = 0;
12284 if (align_words + n_words > GP_ARG_NUM_REG)
12286 /* Not all of the arg fits in gprs. Say that it goes in memory
12287 too, using a magic NULL_RTX component. Also see comment in
12288 rs6000_mixed_function_arg for why the normal
12289 function_arg_partial_nregs scheme doesn't work in this case. */
12290 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12295 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12296 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12297 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12299 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12301 else
12303 /* The whole arg fits in gprs. */
12304 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12305 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12308 else
12310 /* It's entirely in memory. */
12311 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12314 return k;
12317 /* RVEC is a vector of K components of an argument of mode MODE.
12318 Construct the final function_arg return value from it. */
12320 static rtx
12321 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12323 gcc_assert (k >= 1);
12325 /* Avoid returning a PARALLEL in the trivial cases. */
12326 if (k == 1)
12328 if (XEXP (rvec[0], 0) == NULL_RTX)
12329 return NULL_RTX;
12331 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12332 return XEXP (rvec[0], 0);
12335 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12338 /* Determine where to put an argument to a function.
12339 Value is zero to push the argument on the stack,
12340 or a hard register in which to store the argument.
12342 MODE is the argument's machine mode.
12343 TYPE is the data type of the argument (as a tree).
12344 This is null for libcalls where that information may
12345 not be available.
12346 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12347 the preceding args and about the function being called. It is
12348 not modified in this routine.
12349 NAMED is nonzero if this argument is a named parameter
12350 (otherwise it is an extra parameter matching an ellipsis).
12352 On RS/6000 the first eight words of non-FP are normally in registers
12353 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12354 Under V.4, the first 8 FP args are in registers.
12356 If this is floating-point and no prototype is specified, we use
12357 both an FP and integer register (or possibly FP reg and stack). Library
12358 functions (when CALL_LIBCALL is set) always have the proper types for args,
12359 so we can pass the FP value just in one register. emit_library_function
12360 doesn't support PARALLEL anyway.
12362 Note that for args passed by reference, function_arg will be called
12363 with MODE and TYPE set to that of the pointer to the arg, not the arg
12364 itself. */
12366 static rtx
12367 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12368 const_tree type, bool named)
12370 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12371 enum rs6000_abi abi = DEFAULT_ABI;
12372 machine_mode elt_mode;
12373 int n_elts;
12375 /* Return a marker to indicate whether CR1 needs to set or clear the
12376 bit that V.4 uses to say fp args were passed in registers.
12377 Assume that we don't need the marker for software floating point,
12378 or compiler generated library calls. */
12379 if (mode == VOIDmode)
12381 if (abi == ABI_V4
12382 && (cum->call_cookie & CALL_LIBCALL) == 0
12383 && (cum->stdarg
12384 || (cum->nargs_prototype < 0
12385 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12386 && TARGET_HARD_FLOAT)
12387 return GEN_INT (cum->call_cookie
12388 | ((cum->fregno == FP_ARG_MIN_REG)
12389 ? CALL_V4_SET_FP_ARGS
12390 : CALL_V4_CLEAR_FP_ARGS));
12392 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12395 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12397 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12399 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12400 if (rslt != NULL_RTX)
12401 return rslt;
12402 /* Else fall through to usual handling. */
12405 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12407 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12408 rtx r, off;
12409 int i, k = 0;
12411 /* Do we also need to pass this argument in the parameter save area?
12412 Library support functions for IEEE 128-bit are assumed to not need the
12413 value passed both in GPRs and in vector registers. */
12414 if (TARGET_64BIT && !cum->prototype
12415 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12417 int align_words = ROUND_UP (cum->words, 2);
12418 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12421 /* Describe where this argument goes in the vector registers. */
12422 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12424 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12425 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12426 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12429 return rs6000_finish_function_arg (mode, rvec, k);
12431 else if (TARGET_ALTIVEC_ABI
12432 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12433 || (type && TREE_CODE (type) == VECTOR_TYPE
12434 && int_size_in_bytes (type) == 16)))
12436 if (named || abi == ABI_V4)
12437 return NULL_RTX;
12438 else
12440 /* Vector parameters to varargs functions under AIX or Darwin
12441 get passed in memory and possibly also in GPRs. */
12442 int align, align_words, n_words;
12443 machine_mode part_mode;
12445 /* Vector parameters must be 16-byte aligned. In 32-bit
12446 mode this means we need to take into account the offset
12447 to the parameter save area. In 64-bit mode, they just
12448 have to start on an even word, since the parameter save
12449 area is 16-byte aligned. */
12450 if (TARGET_32BIT)
12451 align = -(rs6000_parm_offset () + cum->words) & 3;
12452 else
12453 align = cum->words & 1;
12454 align_words = cum->words + align;
12456 /* Out of registers? Memory, then. */
12457 if (align_words >= GP_ARG_NUM_REG)
12458 return NULL_RTX;
12460 if (TARGET_32BIT && TARGET_POWERPC64)
12461 return rs6000_mixed_function_arg (mode, type, align_words);
12463 /* The vector value goes in GPRs. Only the part of the
12464 value in GPRs is reported here. */
12465 part_mode = mode;
12466 n_words = rs6000_arg_size (mode, type);
12467 if (align_words + n_words > GP_ARG_NUM_REG)
12468 /* Fortunately, there are only two possibilities, the value
12469 is either wholly in GPRs or half in GPRs and half not. */
12470 part_mode = DImode;
12472 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12476 else if (abi == ABI_V4)
12478 if (abi_v4_pass_in_fpr (mode))
12480 /* _Decimal128 must use an even/odd register pair. This assumes
12481 that the register number is odd when fregno is odd. */
12482 if (mode == TDmode && (cum->fregno % 2) == 1)
12483 cum->fregno++;
12485 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12486 <= FP_ARG_V4_MAX_REG)
12487 return gen_rtx_REG (mode, cum->fregno);
12488 else
12489 return NULL_RTX;
12491 else
12493 int n_words = rs6000_arg_size (mode, type);
12494 int gregno = cum->sysv_gregno;
12496 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12497 As does any other 2 word item such as complex int due to a
12498 historical mistake. */
12499 if (n_words == 2)
12500 gregno += (1 - gregno) & 1;
12502 /* Multi-reg args are not split between registers and stack. */
12503 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12504 return NULL_RTX;
12506 if (TARGET_32BIT && TARGET_POWERPC64)
12507 return rs6000_mixed_function_arg (mode, type,
12508 gregno - GP_ARG_MIN_REG);
12509 return gen_rtx_REG (mode, gregno);
12512 else
12514 int align_words = rs6000_parm_start (mode, type, cum->words);
12516 /* _Decimal128 must be passed in an even/odd float register pair.
12517 This assumes that the register number is odd when fregno is odd. */
12518 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12519 cum->fregno++;
12521 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12523 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12524 rtx r, off;
12525 int i, k = 0;
12526 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12527 int fpr_words;
12529 /* Do we also need to pass this argument in the parameter
12530 save area? */
12531 if (type && (cum->nargs_prototype <= 0
12532 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12533 && TARGET_XL_COMPAT
12534 && align_words >= GP_ARG_NUM_REG)))
12535 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12537 /* Describe where this argument goes in the fprs. */
12538 for (i = 0; i < n_elts
12539 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12541 /* Check if the argument is split over registers and memory.
12542 This can only ever happen for long double or _Decimal128;
12543 complex types are handled via split_complex_arg. */
12544 machine_mode fmode = elt_mode;
12545 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12547 gcc_assert (FLOAT128_2REG_P (fmode));
12548 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12551 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12552 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12553 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12556 /* If there were not enough FPRs to hold the argument, the rest
12557 usually goes into memory. However, if the current position
12558 is still within the register parameter area, a portion may
12559 actually have to go into GPRs.
12561 Note that it may happen that the portion of the argument
12562 passed in the first "half" of the first GPR was already
12563 passed in the last FPR as well.
12565 For unnamed arguments, we already set up GPRs to cover the
12566 whole argument in rs6000_psave_function_arg, so there is
12567 nothing further to do at this point. */
12568 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12569 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12570 && cum->nargs_prototype > 0)
12572 static bool warned;
12574 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12575 int n_words = rs6000_arg_size (mode, type);
12577 align_words += fpr_words;
12578 n_words -= fpr_words;
12582 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12583 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12584 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12586 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12588 if (!warned && warn_psabi)
12590 warned = true;
12591 inform (input_location,
12592 "the ABI of passing homogeneous float aggregates"
12593 " has changed in GCC 5");
12597 return rs6000_finish_function_arg (mode, rvec, k);
12599 else if (align_words < GP_ARG_NUM_REG)
12601 if (TARGET_32BIT && TARGET_POWERPC64)
12602 return rs6000_mixed_function_arg (mode, type, align_words);
12604 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12606 else
12607 return NULL_RTX;
12611 /* For an arg passed partly in registers and partly in memory, this is
12612 the number of bytes passed in registers. For args passed entirely in
12613 registers or entirely in memory, zero. When an arg is described by a
12614 PARALLEL, perhaps using more than one register type, this function
12615 returns the number of bytes used by the first element of the PARALLEL. */
12617 static int
12618 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12619 tree type, bool named)
12621 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12622 bool passed_in_gprs = true;
12623 int ret = 0;
12624 int align_words;
12625 machine_mode elt_mode;
12626 int n_elts;
12628 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12630 if (DEFAULT_ABI == ABI_V4)
12631 return 0;
12633 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12635 /* If we are passing this arg in the fixed parameter save area (gprs or
12636 memory) as well as VRs, we do not use the partial bytes mechanism;
12637 instead, rs6000_function_arg will return a PARALLEL including a memory
12638 element as necessary. Library support functions for IEEE 128-bit are
12639 assumed to not need the value passed both in GPRs and in vector
12640 registers. */
12641 if (TARGET_64BIT && !cum->prototype
12642 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12643 return 0;
12645 /* Otherwise, we pass in VRs only. Check for partial copies. */
12646 passed_in_gprs = false;
12647 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12648 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12651 /* In this complicated case we just disable the partial_nregs code. */
12652 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12653 return 0;
12655 align_words = rs6000_parm_start (mode, type, cum->words);
12657 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12659 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12661 /* If we are passing this arg in the fixed parameter save area
12662 (gprs or memory) as well as FPRs, we do not use the partial
12663 bytes mechanism; instead, rs6000_function_arg will return a
12664 PARALLEL including a memory element as necessary. */
12665 if (type
12666 && (cum->nargs_prototype <= 0
12667 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12668 && TARGET_XL_COMPAT
12669 && align_words >= GP_ARG_NUM_REG)))
12670 return 0;
12672 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12673 passed_in_gprs = false;
12674 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12676 /* Compute number of bytes / words passed in FPRs. If there
12677 is still space available in the register parameter area
12678 *after* that amount, a part of the argument will be passed
12679 in GPRs. In that case, the total amount passed in any
12680 registers is equal to the amount that would have been passed
12681 in GPRs if everything were passed there, so we fall back to
12682 the GPR code below to compute the appropriate value. */
12683 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12684 * MIN (8, GET_MODE_SIZE (elt_mode)));
12685 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12687 if (align_words + fpr_words < GP_ARG_NUM_REG)
12688 passed_in_gprs = true;
12689 else
12690 ret = fpr;
12694 if (passed_in_gprs
12695 && align_words < GP_ARG_NUM_REG
12696 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12697 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12699 if (ret != 0 && TARGET_DEBUG_ARG)
12700 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12702 return ret;
12705 /* A C expression that indicates when an argument must be passed by
12706 reference. If nonzero for an argument, a copy of that argument is
12707 made in memory and a pointer to the argument is passed instead of
12708 the argument itself. The pointer is passed in whatever way is
12709 appropriate for passing a pointer to that type.
12711 Under V.4, aggregates and long double are passed by reference.
12713 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12714 reference unless the AltiVec vector extension ABI is in force.
12716 As an extension to all ABIs, variable sized types are passed by
12717 reference. */
12719 static bool
12720 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12721 machine_mode mode, const_tree type,
12722 bool named ATTRIBUTE_UNUSED)
12724 if (!type)
12725 return 0;
12727 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12728 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12730 if (TARGET_DEBUG_ARG)
12731 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12732 return 1;
12735 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12737 if (TARGET_DEBUG_ARG)
12738 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12739 return 1;
12742 if (int_size_in_bytes (type) < 0)
12744 if (TARGET_DEBUG_ARG)
12745 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12746 return 1;
12749 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12750 modes only exist for GCC vector types if -maltivec. */
12751 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12753 if (TARGET_DEBUG_ARG)
12754 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12755 return 1;
12758 /* Pass synthetic vectors in memory. */
12759 if (TREE_CODE (type) == VECTOR_TYPE
12760 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12762 static bool warned_for_pass_big_vectors = false;
12763 if (TARGET_DEBUG_ARG)
12764 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12765 if (!warned_for_pass_big_vectors)
12767 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12768 "non-standard ABI extension with no compatibility "
12769 "guarantee");
12770 warned_for_pass_big_vectors = true;
12772 return 1;
12775 return 0;
12778 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12779 already processes. Return true if the parameter must be passed
12780 (fully or partially) on the stack. */
12782 static bool
12783 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12785 machine_mode mode;
12786 int unsignedp;
12787 rtx entry_parm;
12789 /* Catch errors. */
12790 if (type == NULL || type == error_mark_node)
12791 return true;
12793 /* Handle types with no storage requirement. */
12794 if (TYPE_MODE (type) == VOIDmode)
12795 return false;
12797 /* Handle complex types. */
12798 if (TREE_CODE (type) == COMPLEX_TYPE)
12799 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12800 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12802 /* Handle transparent aggregates. */
12803 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12804 && TYPE_TRANSPARENT_AGGR (type))
12805 type = TREE_TYPE (first_field (type));
12807 /* See if this arg was passed by invisible reference. */
12808 if (pass_by_reference (get_cumulative_args (args_so_far),
12809 TYPE_MODE (type), type, true))
12810 type = build_pointer_type (type);
12812 /* Find mode as it is passed by the ABI. */
12813 unsignedp = TYPE_UNSIGNED (type);
12814 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12816 /* If we must pass in stack, we need a stack. */
12817 if (rs6000_must_pass_in_stack (mode, type))
12818 return true;
12820 /* If there is no incoming register, we need a stack. */
12821 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12822 if (entry_parm == NULL)
12823 return true;
12825 /* Likewise if we need to pass both in registers and on the stack. */
12826 if (GET_CODE (entry_parm) == PARALLEL
12827 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12828 return true;
12830 /* Also true if we're partially in registers and partially not. */
12831 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12832 return true;
12834 /* Update info on where next arg arrives in registers. */
12835 rs6000_function_arg_advance (args_so_far, mode, type, true);
12836 return false;
12839 /* Return true if FUN has no prototype, has a variable argument
12840 list, or passes any parameter in memory. */
12842 static bool
12843 rs6000_function_parms_need_stack (tree fun, bool incoming)
12845 tree fntype, result;
12846 CUMULATIVE_ARGS args_so_far_v;
12847 cumulative_args_t args_so_far;
12849 if (!fun)
12850 /* Must be a libcall, all of which only use reg parms. */
12851 return false;
12853 fntype = fun;
12854 if (!TYPE_P (fun))
12855 fntype = TREE_TYPE (fun);
12857 /* Varargs functions need the parameter save area. */
12858 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12859 return true;
12861 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12862 args_so_far = pack_cumulative_args (&args_so_far_v);
12864 /* When incoming, we will have been passed the function decl.
12865 It is necessary to use the decl to handle K&R style functions,
12866 where TYPE_ARG_TYPES may not be available. */
12867 if (incoming)
12869 gcc_assert (DECL_P (fun));
12870 result = DECL_RESULT (fun);
12872 else
12873 result = TREE_TYPE (fntype);
12875 if (result && aggregate_value_p (result, fntype))
12877 if (!TYPE_P (result))
12878 result = TREE_TYPE (result);
12879 result = build_pointer_type (result);
12880 rs6000_parm_needs_stack (args_so_far, result);
12883 if (incoming)
12885 tree parm;
12887 for (parm = DECL_ARGUMENTS (fun);
12888 parm && parm != void_list_node;
12889 parm = TREE_CHAIN (parm))
12890 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12891 return true;
12893 else
12895 function_args_iterator args_iter;
12896 tree arg_type;
12898 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12899 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12900 return true;
12903 return false;
12906 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12907 usually a constant depending on the ABI. However, in the ELFv2 ABI
12908 the register parameter area is optional when calling a function that
12909 has a prototype is scope, has no variable argument list, and passes
12910 all parameters in registers. */
12913 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12915 int reg_parm_stack_space;
12917 switch (DEFAULT_ABI)
12919 default:
12920 reg_parm_stack_space = 0;
12921 break;
12923 case ABI_AIX:
12924 case ABI_DARWIN:
12925 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12926 break;
12928 case ABI_ELFv2:
12929 /* ??? Recomputing this every time is a bit expensive. Is there
12930 a place to cache this information? */
12931 if (rs6000_function_parms_need_stack (fun, incoming))
12932 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12933 else
12934 reg_parm_stack_space = 0;
12935 break;
12938 return reg_parm_stack_space;
12941 static void
12942 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12944 int i;
12945 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12947 if (nregs == 0)
12948 return;
12950 for (i = 0; i < nregs; i++)
12952 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12953 if (reload_completed)
12955 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12956 tem = NULL_RTX;
12957 else
12958 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12959 i * GET_MODE_SIZE (reg_mode));
12961 else
12962 tem = replace_equiv_address (tem, XEXP (tem, 0));
12964 gcc_assert (tem);
12966 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12970 /* Perform any needed actions needed for a function that is receiving a
12971 variable number of arguments.
12973 CUM is as above.
12975 MODE and TYPE are the mode and type of the current parameter.
12977 PRETEND_SIZE is a variable that should be set to the amount of stack
12978 that must be pushed by the prolog to pretend that our caller pushed
12981 Normally, this macro will push all remaining incoming registers on the
12982 stack and set PRETEND_SIZE to the length of the registers pushed. */
12984 static void
12985 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12986 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12987 int no_rtl)
12989 CUMULATIVE_ARGS next_cum;
12990 int reg_size = TARGET_32BIT ? 4 : 8;
12991 rtx save_area = NULL_RTX, mem;
12992 int first_reg_offset;
12993 alias_set_type set;
12995 /* Skip the last named argument. */
12996 next_cum = *get_cumulative_args (cum);
12997 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12999 if (DEFAULT_ABI == ABI_V4)
13001 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13003 if (! no_rtl)
13005 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13006 HOST_WIDE_INT offset = 0;
13008 /* Try to optimize the size of the varargs save area.
13009 The ABI requires that ap.reg_save_area is doubleword
13010 aligned, but we don't need to allocate space for all
13011 the bytes, only those to which we actually will save
13012 anything. */
13013 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13014 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13015 if (TARGET_HARD_FLOAT
13016 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13017 && cfun->va_list_fpr_size)
13019 if (gpr_reg_num)
13020 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13021 * UNITS_PER_FP_WORD;
13022 if (cfun->va_list_fpr_size
13023 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13024 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13025 else
13026 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13027 * UNITS_PER_FP_WORD;
13029 if (gpr_reg_num)
13031 offset = -((first_reg_offset * reg_size) & ~7);
13032 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13034 gpr_reg_num = cfun->va_list_gpr_size;
13035 if (reg_size == 4 && (first_reg_offset & 1))
13036 gpr_reg_num++;
13038 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13040 else if (fpr_size)
13041 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13042 * UNITS_PER_FP_WORD
13043 - (int) (GP_ARG_NUM_REG * reg_size);
13045 if (gpr_size + fpr_size)
13047 rtx reg_save_area
13048 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13049 gcc_assert (GET_CODE (reg_save_area) == MEM);
13050 reg_save_area = XEXP (reg_save_area, 0);
13051 if (GET_CODE (reg_save_area) == PLUS)
13053 gcc_assert (XEXP (reg_save_area, 0)
13054 == virtual_stack_vars_rtx);
13055 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13056 offset += INTVAL (XEXP (reg_save_area, 1));
13058 else
13059 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13062 cfun->machine->varargs_save_offset = offset;
13063 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13066 else
13068 first_reg_offset = next_cum.words;
13069 save_area = crtl->args.internal_arg_pointer;
13071 if (targetm.calls.must_pass_in_stack (mode, type))
13072 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13075 set = get_varargs_alias_set ();
13076 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13077 && cfun->va_list_gpr_size)
13079 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13081 if (va_list_gpr_counter_field)
13082 /* V4 va_list_gpr_size counts number of registers needed. */
13083 n_gpr = cfun->va_list_gpr_size;
13084 else
13085 /* char * va_list instead counts number of bytes needed. */
13086 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13088 if (nregs > n_gpr)
13089 nregs = n_gpr;
13091 mem = gen_rtx_MEM (BLKmode,
13092 plus_constant (Pmode, save_area,
13093 first_reg_offset * reg_size));
13094 MEM_NOTRAP_P (mem) = 1;
13095 set_mem_alias_set (mem, set);
13096 set_mem_align (mem, BITS_PER_WORD);
13098 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13099 nregs);
13102 /* Save FP registers if needed. */
13103 if (DEFAULT_ABI == ABI_V4
13104 && TARGET_HARD_FLOAT
13105 && ! no_rtl
13106 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13107 && cfun->va_list_fpr_size)
13109 int fregno = next_cum.fregno, nregs;
13110 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13111 rtx lab = gen_label_rtx ();
13112 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13113 * UNITS_PER_FP_WORD);
13115 emit_jump_insn
13116 (gen_rtx_SET (pc_rtx,
13117 gen_rtx_IF_THEN_ELSE (VOIDmode,
13118 gen_rtx_NE (VOIDmode, cr1,
13119 const0_rtx),
13120 gen_rtx_LABEL_REF (VOIDmode, lab),
13121 pc_rtx)));
13123 for (nregs = 0;
13124 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13125 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13127 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13128 ? DFmode : SFmode,
13129 plus_constant (Pmode, save_area, off));
13130 MEM_NOTRAP_P (mem) = 1;
13131 set_mem_alias_set (mem, set);
13132 set_mem_align (mem, GET_MODE_ALIGNMENT (
13133 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13134 ? DFmode : SFmode));
13135 emit_move_insn (mem, gen_rtx_REG (
13136 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13137 ? DFmode : SFmode, fregno));
13140 emit_label (lab);
13144 /* Create the va_list data type. */
13146 static tree
13147 rs6000_build_builtin_va_list (void)
13149 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13151 /* For AIX, prefer 'char *' because that's what the system
13152 header files like. */
13153 if (DEFAULT_ABI != ABI_V4)
13154 return build_pointer_type (char_type_node);
13156 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13157 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13158 get_identifier ("__va_list_tag"), record);
13160 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13161 unsigned_char_type_node);
13162 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13163 unsigned_char_type_node);
13164 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13165 every user file. */
13166 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13167 get_identifier ("reserved"), short_unsigned_type_node);
13168 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13169 get_identifier ("overflow_arg_area"),
13170 ptr_type_node);
13171 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13172 get_identifier ("reg_save_area"),
13173 ptr_type_node);
13175 va_list_gpr_counter_field = f_gpr;
13176 va_list_fpr_counter_field = f_fpr;
13178 DECL_FIELD_CONTEXT (f_gpr) = record;
13179 DECL_FIELD_CONTEXT (f_fpr) = record;
13180 DECL_FIELD_CONTEXT (f_res) = record;
13181 DECL_FIELD_CONTEXT (f_ovf) = record;
13182 DECL_FIELD_CONTEXT (f_sav) = record;
13184 TYPE_STUB_DECL (record) = type_decl;
13185 TYPE_NAME (record) = type_decl;
13186 TYPE_FIELDS (record) = f_gpr;
13187 DECL_CHAIN (f_gpr) = f_fpr;
13188 DECL_CHAIN (f_fpr) = f_res;
13189 DECL_CHAIN (f_res) = f_ovf;
13190 DECL_CHAIN (f_ovf) = f_sav;
13192 layout_type (record);
13194 /* The correct type is an array type of one element. */
13195 return build_array_type (record, build_index_type (size_zero_node));
13198 /* Implement va_start. */
13200 static void
13201 rs6000_va_start (tree valist, rtx nextarg)
13203 HOST_WIDE_INT words, n_gpr, n_fpr;
13204 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13205 tree gpr, fpr, ovf, sav, t;
13207 /* Only SVR4 needs something special. */
13208 if (DEFAULT_ABI != ABI_V4)
13210 std_expand_builtin_va_start (valist, nextarg);
13211 return;
13214 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13215 f_fpr = DECL_CHAIN (f_gpr);
13216 f_res = DECL_CHAIN (f_fpr);
13217 f_ovf = DECL_CHAIN (f_res);
13218 f_sav = DECL_CHAIN (f_ovf);
13220 valist = build_simple_mem_ref (valist);
13221 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13222 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13223 f_fpr, NULL_TREE);
13224 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13225 f_ovf, NULL_TREE);
13226 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13227 f_sav, NULL_TREE);
13229 /* Count number of gp and fp argument registers used. */
13230 words = crtl->args.info.words;
13231 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13232 GP_ARG_NUM_REG);
13233 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13234 FP_ARG_NUM_REG);
13236 if (TARGET_DEBUG_ARG)
13237 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13238 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13239 words, n_gpr, n_fpr);
13241 if (cfun->va_list_gpr_size)
13243 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13244 build_int_cst (NULL_TREE, n_gpr));
13245 TREE_SIDE_EFFECTS (t) = 1;
13246 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13249 if (cfun->va_list_fpr_size)
13251 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13252 build_int_cst (NULL_TREE, n_fpr));
13253 TREE_SIDE_EFFECTS (t) = 1;
13254 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13256 #ifdef HAVE_AS_GNU_ATTRIBUTE
13257 if (call_ABI_of_interest (cfun->decl))
13258 rs6000_passes_float = true;
13259 #endif
13262 /* Find the overflow area. */
13263 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13264 if (words != 0)
13265 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13266 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13267 TREE_SIDE_EFFECTS (t) = 1;
13268 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13270 /* If there were no va_arg invocations, don't set up the register
13271 save area. */
13272 if (!cfun->va_list_gpr_size
13273 && !cfun->va_list_fpr_size
13274 && n_gpr < GP_ARG_NUM_REG
13275 && n_fpr < FP_ARG_V4_MAX_REG)
13276 return;
13278 /* Find the register save area. */
13279 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13280 if (cfun->machine->varargs_save_offset)
13281 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13282 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13283 TREE_SIDE_EFFECTS (t) = 1;
13284 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13287 /* Implement va_arg. */
13289 static tree
13290 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13291 gimple_seq *post_p)
13293 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13294 tree gpr, fpr, ovf, sav, reg, t, u;
13295 int size, rsize, n_reg, sav_ofs, sav_scale;
13296 tree lab_false, lab_over, addr;
13297 int align;
13298 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13299 int regalign = 0;
13300 gimple *stmt;
13302 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13304 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13305 return build_va_arg_indirect_ref (t);
13308 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13309 earlier version of gcc, with the property that it always applied alignment
13310 adjustments to the va-args (even for zero-sized types). The cheapest way
13311 to deal with this is to replicate the effect of the part of
13312 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13313 of relevance.
13314 We don't need to check for pass-by-reference because of the test above.
13315 We can return a simplifed answer, since we know there's no offset to add. */
13317 if (((TARGET_MACHO
13318 && rs6000_darwin64_abi)
13319 || DEFAULT_ABI == ABI_ELFv2
13320 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13321 && integer_zerop (TYPE_SIZE (type)))
13323 unsigned HOST_WIDE_INT align, boundary;
13324 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13325 align = PARM_BOUNDARY / BITS_PER_UNIT;
13326 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13327 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13328 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13329 boundary /= BITS_PER_UNIT;
13330 if (boundary > align)
13332 tree t ;
13333 /* This updates arg ptr by the amount that would be necessary
13334 to align the zero-sized (but not zero-alignment) item. */
13335 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13336 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13337 gimplify_and_add (t, pre_p);
13339 t = fold_convert (sizetype, valist_tmp);
13340 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13341 fold_convert (TREE_TYPE (valist),
13342 fold_build2 (BIT_AND_EXPR, sizetype, t,
13343 size_int (-boundary))));
13344 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13345 gimplify_and_add (t, pre_p);
13347 /* Since it is zero-sized there's no increment for the item itself. */
13348 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13349 return build_va_arg_indirect_ref (valist_tmp);
13352 if (DEFAULT_ABI != ABI_V4)
13354 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13356 tree elem_type = TREE_TYPE (type);
13357 machine_mode elem_mode = TYPE_MODE (elem_type);
13358 int elem_size = GET_MODE_SIZE (elem_mode);
13360 if (elem_size < UNITS_PER_WORD)
13362 tree real_part, imag_part;
13363 gimple_seq post = NULL;
13365 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13366 &post);
13367 /* Copy the value into a temporary, lest the formal temporary
13368 be reused out from under us. */
13369 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13370 gimple_seq_add_seq (pre_p, post);
13372 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13373 post_p);
13375 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13379 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13382 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13383 f_fpr = DECL_CHAIN (f_gpr);
13384 f_res = DECL_CHAIN (f_fpr);
13385 f_ovf = DECL_CHAIN (f_res);
13386 f_sav = DECL_CHAIN (f_ovf);
13388 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13389 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13390 f_fpr, NULL_TREE);
13391 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13392 f_ovf, NULL_TREE);
13393 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13394 f_sav, NULL_TREE);
13396 size = int_size_in_bytes (type);
13397 rsize = (size + 3) / 4;
13398 int pad = 4 * rsize - size;
13399 align = 1;
13401 machine_mode mode = TYPE_MODE (type);
13402 if (abi_v4_pass_in_fpr (mode))
13404 /* FP args go in FP registers, if present. */
13405 reg = fpr;
13406 n_reg = (size + 7) / 8;
13407 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13408 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13409 if (mode != SFmode && mode != SDmode)
13410 align = 8;
13412 else
13414 /* Otherwise into GP registers. */
13415 reg = gpr;
13416 n_reg = rsize;
13417 sav_ofs = 0;
13418 sav_scale = 4;
13419 if (n_reg == 2)
13420 align = 8;
13423 /* Pull the value out of the saved registers.... */
13425 lab_over = NULL;
13426 addr = create_tmp_var (ptr_type_node, "addr");
13428 /* AltiVec vectors never go in registers when -mabi=altivec. */
13429 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13430 align = 16;
13431 else
13433 lab_false = create_artificial_label (input_location);
13434 lab_over = create_artificial_label (input_location);
13436 /* Long long is aligned in the registers. As are any other 2 gpr
13437 item such as complex int due to a historical mistake. */
13438 u = reg;
13439 if (n_reg == 2 && reg == gpr)
13441 regalign = 1;
13442 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13443 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13444 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13445 unshare_expr (reg), u);
13447 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13448 reg number is 0 for f1, so we want to make it odd. */
13449 else if (reg == fpr && mode == TDmode)
13451 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13452 build_int_cst (TREE_TYPE (reg), 1));
13453 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13456 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13457 t = build2 (GE_EXPR, boolean_type_node, u, t);
13458 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13459 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13460 gimplify_and_add (t, pre_p);
13462 t = sav;
13463 if (sav_ofs)
13464 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13466 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13467 build_int_cst (TREE_TYPE (reg), n_reg));
13468 u = fold_convert (sizetype, u);
13469 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13470 t = fold_build_pointer_plus (t, u);
13472 /* _Decimal32 varargs are located in the second word of the 64-bit
13473 FP register for 32-bit binaries. */
13474 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13475 t = fold_build_pointer_plus_hwi (t, size);
13477 /* Args are passed right-aligned. */
13478 if (BYTES_BIG_ENDIAN)
13479 t = fold_build_pointer_plus_hwi (t, pad);
13481 gimplify_assign (addr, t, pre_p);
13483 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13485 stmt = gimple_build_label (lab_false);
13486 gimple_seq_add_stmt (pre_p, stmt);
13488 if ((n_reg == 2 && !regalign) || n_reg > 2)
13490 /* Ensure that we don't find any more args in regs.
13491 Alignment has taken care of for special cases. */
13492 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13496 /* ... otherwise out of the overflow area. */
13498 /* Care for on-stack alignment if needed. */
13499 t = ovf;
13500 if (align != 1)
13502 t = fold_build_pointer_plus_hwi (t, align - 1);
13503 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13504 build_int_cst (TREE_TYPE (t), -align));
13507 /* Args are passed right-aligned. */
13508 if (BYTES_BIG_ENDIAN)
13509 t = fold_build_pointer_plus_hwi (t, pad);
13511 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13513 gimplify_assign (unshare_expr (addr), t, pre_p);
13515 t = fold_build_pointer_plus_hwi (t, size);
13516 gimplify_assign (unshare_expr (ovf), t, pre_p);
13518 if (lab_over)
13520 stmt = gimple_build_label (lab_over);
13521 gimple_seq_add_stmt (pre_p, stmt);
13524 if (STRICT_ALIGNMENT
13525 && (TYPE_ALIGN (type)
13526 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13528 /* The value (of type complex double, for example) may not be
13529 aligned in memory in the saved registers, so copy via a
13530 temporary. (This is the same code as used for SPARC.) */
13531 tree tmp = create_tmp_var (type, "va_arg_tmp");
13532 tree dest_addr = build_fold_addr_expr (tmp);
13534 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13535 3, dest_addr, addr, size_int (rsize * 4));
13537 gimplify_and_add (copy, pre_p);
13538 addr = dest_addr;
13541 addr = fold_convert (ptrtype, addr);
13542 return build_va_arg_indirect_ref (addr);
13545 /* Builtins. */
13547 static void
13548 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13550 tree t;
13551 unsigned classify = rs6000_builtin_info[(int)code].attr;
13552 const char *attr_string = "";
13554 gcc_assert (name != NULL);
13555 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13557 if (rs6000_builtin_decls[(int)code])
13558 fatal_error (input_location,
13559 "internal error: builtin function %qs already processed",
13560 name);
13562 rs6000_builtin_decls[(int)code] = t =
13563 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13565 /* Set any special attributes. */
13566 if ((classify & RS6000_BTC_CONST) != 0)
13568 /* const function, function only depends on the inputs. */
13569 TREE_READONLY (t) = 1;
13570 TREE_NOTHROW (t) = 1;
13571 attr_string = ", const";
13573 else if ((classify & RS6000_BTC_PURE) != 0)
13575 /* pure function, function can read global memory, but does not set any
13576 external state. */
13577 DECL_PURE_P (t) = 1;
13578 TREE_NOTHROW (t) = 1;
13579 attr_string = ", pure";
13581 else if ((classify & RS6000_BTC_FP) != 0)
13583 /* Function is a math function. If rounding mode is on, then treat the
13584 function as not reading global memory, but it can have arbitrary side
13585 effects. If it is off, then assume the function is a const function.
13586 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13587 builtin-attribute.def that is used for the math functions. */
13588 TREE_NOTHROW (t) = 1;
13589 if (flag_rounding_math)
13591 DECL_PURE_P (t) = 1;
13592 DECL_IS_NOVOPS (t) = 1;
13593 attr_string = ", fp, pure";
13595 else
13597 TREE_READONLY (t) = 1;
13598 attr_string = ", fp, const";
13601 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13602 gcc_unreachable ();
13604 if (TARGET_DEBUG_BUILTIN)
13605 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13606 (int)code, name, attr_string);
13609 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13611 #undef RS6000_BUILTIN_0
13612 #undef RS6000_BUILTIN_1
13613 #undef RS6000_BUILTIN_2
13614 #undef RS6000_BUILTIN_3
13615 #undef RS6000_BUILTIN_A
13616 #undef RS6000_BUILTIN_D
13617 #undef RS6000_BUILTIN_H
13618 #undef RS6000_BUILTIN_P
13619 #undef RS6000_BUILTIN_Q
13620 #undef RS6000_BUILTIN_X
13622 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13623 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13624 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13625 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13626 { MASK, ICODE, NAME, ENUM },
13628 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13629 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13630 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13631 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13632 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13633 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13635 static const struct builtin_description bdesc_3arg[] =
13637 #include "rs6000-builtin.def"
13640 /* DST operations: void foo (void *, const int, const char). */
13642 #undef RS6000_BUILTIN_0
13643 #undef RS6000_BUILTIN_1
13644 #undef RS6000_BUILTIN_2
13645 #undef RS6000_BUILTIN_3
13646 #undef RS6000_BUILTIN_A
13647 #undef RS6000_BUILTIN_D
13648 #undef RS6000_BUILTIN_H
13649 #undef RS6000_BUILTIN_P
13650 #undef RS6000_BUILTIN_Q
13651 #undef RS6000_BUILTIN_X
13653 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13654 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13655 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13656 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13657 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13658 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13659 { MASK, ICODE, NAME, ENUM },
13661 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13662 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13663 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13664 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13666 static const struct builtin_description bdesc_dst[] =
13668 #include "rs6000-builtin.def"
13671 /* Simple binary operations: VECc = foo (VECa, VECb). */
13673 #undef RS6000_BUILTIN_0
13674 #undef RS6000_BUILTIN_1
13675 #undef RS6000_BUILTIN_2
13676 #undef RS6000_BUILTIN_3
13677 #undef RS6000_BUILTIN_A
13678 #undef RS6000_BUILTIN_D
13679 #undef RS6000_BUILTIN_H
13680 #undef RS6000_BUILTIN_P
13681 #undef RS6000_BUILTIN_Q
13682 #undef RS6000_BUILTIN_X
13684 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13685 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13686 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13687 { MASK, ICODE, NAME, ENUM },
13689 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13690 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13691 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13692 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13693 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13694 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13695 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13697 static const struct builtin_description bdesc_2arg[] =
13699 #include "rs6000-builtin.def"
13702 #undef RS6000_BUILTIN_0
13703 #undef RS6000_BUILTIN_1
13704 #undef RS6000_BUILTIN_2
13705 #undef RS6000_BUILTIN_3
13706 #undef RS6000_BUILTIN_A
13707 #undef RS6000_BUILTIN_D
13708 #undef RS6000_BUILTIN_H
13709 #undef RS6000_BUILTIN_P
13710 #undef RS6000_BUILTIN_Q
13711 #undef RS6000_BUILTIN_X
13713 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13714 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13715 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13716 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13717 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13718 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13719 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13720 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13721 { MASK, ICODE, NAME, ENUM },
13723 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13724 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13726 /* AltiVec predicates. */
13728 static const struct builtin_description bdesc_altivec_preds[] =
13730 #include "rs6000-builtin.def"
13733 /* PAIRED predicates. */
13734 #undef RS6000_BUILTIN_0
13735 #undef RS6000_BUILTIN_1
13736 #undef RS6000_BUILTIN_2
13737 #undef RS6000_BUILTIN_3
13738 #undef RS6000_BUILTIN_A
13739 #undef RS6000_BUILTIN_D
13740 #undef RS6000_BUILTIN_H
13741 #undef RS6000_BUILTIN_P
13742 #undef RS6000_BUILTIN_Q
13743 #undef RS6000_BUILTIN_X
13745 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13746 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13747 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13748 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13749 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13750 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13751 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13752 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13753 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
13754 { MASK, ICODE, NAME, ENUM },
13756 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13758 static const struct builtin_description bdesc_paired_preds[] =
13760 #include "rs6000-builtin.def"
13763 /* ABS* operations. */
13765 #undef RS6000_BUILTIN_0
13766 #undef RS6000_BUILTIN_1
13767 #undef RS6000_BUILTIN_2
13768 #undef RS6000_BUILTIN_3
13769 #undef RS6000_BUILTIN_A
13770 #undef RS6000_BUILTIN_D
13771 #undef RS6000_BUILTIN_H
13772 #undef RS6000_BUILTIN_P
13773 #undef RS6000_BUILTIN_Q
13774 #undef RS6000_BUILTIN_X
13776 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13777 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13778 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13779 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13780 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13781 { MASK, ICODE, NAME, ENUM },
13783 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13784 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13785 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13786 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13787 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13789 static const struct builtin_description bdesc_abs[] =
13791 #include "rs6000-builtin.def"
13794 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13795 foo (VECa). */
13797 #undef RS6000_BUILTIN_0
13798 #undef RS6000_BUILTIN_1
13799 #undef RS6000_BUILTIN_2
13800 #undef RS6000_BUILTIN_3
13801 #undef RS6000_BUILTIN_A
13802 #undef RS6000_BUILTIN_D
13803 #undef RS6000_BUILTIN_H
13804 #undef RS6000_BUILTIN_P
13805 #undef RS6000_BUILTIN_Q
13806 #undef RS6000_BUILTIN_X
13808 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13809 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13810 { MASK, ICODE, NAME, ENUM },
13812 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13814 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13815 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13816 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13817 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13818 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13819 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13821 static const struct builtin_description bdesc_1arg[] =
13823 #include "rs6000-builtin.def"
13826 /* Simple no-argument operations: result = __builtin_darn_32 () */
13828 #undef RS6000_BUILTIN_0
13829 #undef RS6000_BUILTIN_1
13830 #undef RS6000_BUILTIN_2
13831 #undef RS6000_BUILTIN_3
13832 #undef RS6000_BUILTIN_A
13833 #undef RS6000_BUILTIN_D
13834 #undef RS6000_BUILTIN_H
13835 #undef RS6000_BUILTIN_P
13836 #undef RS6000_BUILTIN_Q
13837 #undef RS6000_BUILTIN_X
13839 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13840 { MASK, ICODE, NAME, ENUM },
13842 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13843 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13844 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13845 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13846 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13847 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13848 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13852 static const struct builtin_description bdesc_0arg[] =
13854 #include "rs6000-builtin.def"
13857 /* HTM builtins. */
13858 #undef RS6000_BUILTIN_0
13859 #undef RS6000_BUILTIN_1
13860 #undef RS6000_BUILTIN_2
13861 #undef RS6000_BUILTIN_3
13862 #undef RS6000_BUILTIN_A
13863 #undef RS6000_BUILTIN_D
13864 #undef RS6000_BUILTIN_H
13865 #undef RS6000_BUILTIN_P
13866 #undef RS6000_BUILTIN_Q
13867 #undef RS6000_BUILTIN_X
13869 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13870 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13871 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13872 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13873 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13874 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13875 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13876 { MASK, ICODE, NAME, ENUM },
13878 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13879 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13880 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13882 static const struct builtin_description bdesc_htm[] =
13884 #include "rs6000-builtin.def"
13887 #undef RS6000_BUILTIN_0
13888 #undef RS6000_BUILTIN_1
13889 #undef RS6000_BUILTIN_2
13890 #undef RS6000_BUILTIN_3
13891 #undef RS6000_BUILTIN_A
13892 #undef RS6000_BUILTIN_D
13893 #undef RS6000_BUILTIN_H
13894 #undef RS6000_BUILTIN_P
13895 #undef RS6000_BUILTIN_Q
13897 /* Return true if a builtin function is overloaded. */
13898 bool
13899 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13901 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13904 const char *
13905 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13907 return rs6000_builtin_info[(int)fncode].name;
13910 /* Expand an expression EXP that calls a builtin without arguments. */
13911 static rtx
13912 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13914 rtx pat;
13915 machine_mode tmode = insn_data[icode].operand[0].mode;
13917 if (icode == CODE_FOR_nothing)
13918 /* Builtin not supported on this processor. */
13919 return 0;
13921 if (target == 0
13922 || GET_MODE (target) != tmode
13923 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13924 target = gen_reg_rtx (tmode);
13926 pat = GEN_FCN (icode) (target);
13927 if (! pat)
13928 return 0;
13929 emit_insn (pat);
13931 return target;
13935 static rtx
13936 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13938 rtx pat;
13939 tree arg0 = CALL_EXPR_ARG (exp, 0);
13940 tree arg1 = CALL_EXPR_ARG (exp, 1);
13941 rtx op0 = expand_normal (arg0);
13942 rtx op1 = expand_normal (arg1);
13943 machine_mode mode0 = insn_data[icode].operand[0].mode;
13944 machine_mode mode1 = insn_data[icode].operand[1].mode;
13946 if (icode == CODE_FOR_nothing)
13947 /* Builtin not supported on this processor. */
13948 return 0;
13950 /* If we got invalid arguments bail out before generating bad rtl. */
13951 if (arg0 == error_mark_node || arg1 == error_mark_node)
13952 return const0_rtx;
13954 if (GET_CODE (op0) != CONST_INT
13955 || INTVAL (op0) > 255
13956 || INTVAL (op0) < 0)
13958 error ("argument 1 must be an 8-bit field value");
13959 return const0_rtx;
13962 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13963 op0 = copy_to_mode_reg (mode0, op0);
13965 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13966 op1 = copy_to_mode_reg (mode1, op1);
13968 pat = GEN_FCN (icode) (op0, op1);
13969 if (! pat)
13970 return const0_rtx;
13971 emit_insn (pat);
13973 return NULL_RTX;
13976 static rtx
13977 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13979 rtx pat;
13980 tree arg0 = CALL_EXPR_ARG (exp, 0);
13981 rtx op0 = expand_normal (arg0);
13982 machine_mode tmode = insn_data[icode].operand[0].mode;
13983 machine_mode mode0 = insn_data[icode].operand[1].mode;
13985 if (icode == CODE_FOR_nothing)
13986 /* Builtin not supported on this processor. */
13987 return 0;
13989 /* If we got invalid arguments bail out before generating bad rtl. */
13990 if (arg0 == error_mark_node)
13991 return const0_rtx;
13993 if (icode == CODE_FOR_altivec_vspltisb
13994 || icode == CODE_FOR_altivec_vspltish
13995 || icode == CODE_FOR_altivec_vspltisw)
13997 /* Only allow 5-bit *signed* literals. */
13998 if (GET_CODE (op0) != CONST_INT
13999 || INTVAL (op0) > 15
14000 || INTVAL (op0) < -16)
14002 error ("argument 1 must be a 5-bit signed literal");
14003 return CONST0_RTX (tmode);
14007 if (target == 0
14008 || GET_MODE (target) != tmode
14009 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14010 target = gen_reg_rtx (tmode);
14012 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14013 op0 = copy_to_mode_reg (mode0, op0);
14015 pat = GEN_FCN (icode) (target, op0);
14016 if (! pat)
14017 return 0;
14018 emit_insn (pat);
14020 return target;
14023 static rtx
14024 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14026 rtx pat, scratch1, scratch2;
14027 tree arg0 = CALL_EXPR_ARG (exp, 0);
14028 rtx op0 = expand_normal (arg0);
14029 machine_mode tmode = insn_data[icode].operand[0].mode;
14030 machine_mode mode0 = insn_data[icode].operand[1].mode;
14032 /* If we have invalid arguments, bail out before generating bad rtl. */
14033 if (arg0 == error_mark_node)
14034 return const0_rtx;
14036 if (target == 0
14037 || GET_MODE (target) != tmode
14038 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14039 target = gen_reg_rtx (tmode);
14041 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14042 op0 = copy_to_mode_reg (mode0, op0);
14044 scratch1 = gen_reg_rtx (mode0);
14045 scratch2 = gen_reg_rtx (mode0);
14047 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14048 if (! pat)
14049 return 0;
14050 emit_insn (pat);
14052 return target;
14055 static rtx
14056 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14058 rtx pat;
14059 tree arg0 = CALL_EXPR_ARG (exp, 0);
14060 tree arg1 = CALL_EXPR_ARG (exp, 1);
14061 rtx op0 = expand_normal (arg0);
14062 rtx op1 = expand_normal (arg1);
14063 machine_mode tmode = insn_data[icode].operand[0].mode;
14064 machine_mode mode0 = insn_data[icode].operand[1].mode;
14065 machine_mode mode1 = insn_data[icode].operand[2].mode;
14067 if (icode == CODE_FOR_nothing)
14068 /* Builtin not supported on this processor. */
14069 return 0;
14071 /* If we got invalid arguments bail out before generating bad rtl. */
14072 if (arg0 == error_mark_node || arg1 == error_mark_node)
14073 return const0_rtx;
14075 if (icode == CODE_FOR_altivec_vcfux
14076 || icode == CODE_FOR_altivec_vcfsx
14077 || icode == CODE_FOR_altivec_vctsxs
14078 || icode == CODE_FOR_altivec_vctuxs
14079 || icode == CODE_FOR_altivec_vspltb
14080 || icode == CODE_FOR_altivec_vsplth
14081 || icode == CODE_FOR_altivec_vspltw)
14083 /* Only allow 5-bit unsigned literals. */
14084 STRIP_NOPS (arg1);
14085 if (TREE_CODE (arg1) != INTEGER_CST
14086 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14088 error ("argument 2 must be a 5-bit unsigned literal");
14089 return CONST0_RTX (tmode);
14092 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14093 || icode == CODE_FOR_dfptstsfi_lt_dd
14094 || icode == CODE_FOR_dfptstsfi_gt_dd
14095 || icode == CODE_FOR_dfptstsfi_unordered_dd
14096 || icode == CODE_FOR_dfptstsfi_eq_td
14097 || icode == CODE_FOR_dfptstsfi_lt_td
14098 || icode == CODE_FOR_dfptstsfi_gt_td
14099 || icode == CODE_FOR_dfptstsfi_unordered_td)
14101 /* Only allow 6-bit unsigned literals. */
14102 STRIP_NOPS (arg0);
14103 if (TREE_CODE (arg0) != INTEGER_CST
14104 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14106 error ("argument 1 must be a 6-bit unsigned literal");
14107 return CONST0_RTX (tmode);
14110 else if (icode == CODE_FOR_xststdcqp
14111 || icode == CODE_FOR_xststdcdp
14112 || icode == CODE_FOR_xststdcsp
14113 || icode == CODE_FOR_xvtstdcdp
14114 || icode == CODE_FOR_xvtstdcsp)
14116 /* Only allow 7-bit unsigned literals. */
14117 STRIP_NOPS (arg1);
14118 if (TREE_CODE (arg1) != INTEGER_CST
14119 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14121 error ("argument 2 must be a 7-bit unsigned literal");
14122 return CONST0_RTX (tmode);
14125 else if (icode == CODE_FOR_unpackv1ti
14126 || icode == CODE_FOR_unpackkf
14127 || icode == CODE_FOR_unpacktf
14128 || icode == CODE_FOR_unpackif
14129 || icode == CODE_FOR_unpacktd)
14131 /* Only allow 1-bit unsigned literals. */
14132 STRIP_NOPS (arg1);
14133 if (TREE_CODE (arg1) != INTEGER_CST
14134 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
14136 error ("argument 2 must be a 1-bit unsigned literal");
14137 return CONST0_RTX (tmode);
14141 if (target == 0
14142 || GET_MODE (target) != tmode
14143 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14144 target = gen_reg_rtx (tmode);
14146 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14147 op0 = copy_to_mode_reg (mode0, op0);
14148 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14149 op1 = copy_to_mode_reg (mode1, op1);
14151 pat = GEN_FCN (icode) (target, op0, op1);
14152 if (! pat)
14153 return 0;
14154 emit_insn (pat);
14156 return target;
14159 static rtx
14160 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14162 rtx pat, scratch;
14163 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14164 tree arg0 = CALL_EXPR_ARG (exp, 1);
14165 tree arg1 = CALL_EXPR_ARG (exp, 2);
14166 rtx op0 = expand_normal (arg0);
14167 rtx op1 = expand_normal (arg1);
14168 machine_mode tmode = SImode;
14169 machine_mode mode0 = insn_data[icode].operand[1].mode;
14170 machine_mode mode1 = insn_data[icode].operand[2].mode;
14171 int cr6_form_int;
14173 if (TREE_CODE (cr6_form) != INTEGER_CST)
14175 error ("argument 1 of %qs must be a constant",
14176 "__builtin_altivec_predicate");
14177 return const0_rtx;
14179 else
14180 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14182 gcc_assert (mode0 == mode1);
14184 /* If we have invalid arguments, bail out before generating bad rtl. */
14185 if (arg0 == error_mark_node || arg1 == error_mark_node)
14186 return const0_rtx;
14188 if (target == 0
14189 || GET_MODE (target) != tmode
14190 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14191 target = gen_reg_rtx (tmode);
14193 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14194 op0 = copy_to_mode_reg (mode0, op0);
14195 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14196 op1 = copy_to_mode_reg (mode1, op1);
14198 /* Note that for many of the relevant operations (e.g. cmpne or
14199 cmpeq) with float or double operands, it makes more sense for the
14200 mode of the allocated scratch register to select a vector of
14201 integer. But the choice to copy the mode of operand 0 was made
14202 long ago and there are no plans to change it. */
14203 scratch = gen_reg_rtx (mode0);
14205 pat = GEN_FCN (icode) (scratch, op0, op1);
14206 if (! pat)
14207 return 0;
14208 emit_insn (pat);
14210 /* The vec_any* and vec_all* predicates use the same opcodes for two
14211 different operations, but the bits in CR6 will be different
14212 depending on what information we want. So we have to play tricks
14213 with CR6 to get the right bits out.
14215 If you think this is disgusting, look at the specs for the
14216 AltiVec predicates. */
14218 switch (cr6_form_int)
14220 case 0:
14221 emit_insn (gen_cr6_test_for_zero (target));
14222 break;
14223 case 1:
14224 emit_insn (gen_cr6_test_for_zero_reverse (target));
14225 break;
14226 case 2:
14227 emit_insn (gen_cr6_test_for_lt (target));
14228 break;
14229 case 3:
14230 emit_insn (gen_cr6_test_for_lt_reverse (target));
14231 break;
14232 default:
14233 error ("argument 1 of %qs is out of range",
14234 "__builtin_altivec_predicate");
14235 break;
14238 return target;
14241 static rtx
14242 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14244 rtx pat, addr;
14245 tree arg0 = CALL_EXPR_ARG (exp, 0);
14246 tree arg1 = CALL_EXPR_ARG (exp, 1);
14247 machine_mode tmode = insn_data[icode].operand[0].mode;
14248 machine_mode mode0 = Pmode;
14249 machine_mode mode1 = Pmode;
14250 rtx op0 = expand_normal (arg0);
14251 rtx op1 = expand_normal (arg1);
14253 if (icode == CODE_FOR_nothing)
14254 /* Builtin not supported on this processor. */
14255 return 0;
14257 /* If we got invalid arguments bail out before generating bad rtl. */
14258 if (arg0 == error_mark_node || arg1 == error_mark_node)
14259 return const0_rtx;
14261 if (target == 0
14262 || GET_MODE (target) != tmode
14263 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14264 target = gen_reg_rtx (tmode);
14266 op1 = copy_to_mode_reg (mode1, op1);
14268 if (op0 == const0_rtx)
14270 addr = gen_rtx_MEM (tmode, op1);
14272 else
14274 op0 = copy_to_mode_reg (mode0, op0);
14275 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14278 pat = GEN_FCN (icode) (target, addr);
14280 if (! pat)
14281 return 0;
14282 emit_insn (pat);
14284 return target;
14287 /* Return a constant vector for use as a little-endian permute control vector
14288 to reverse the order of elements of the given vector mode. */
14289 static rtx
14290 swap_selector_for_mode (machine_mode mode)
14292 /* These are little endian vectors, so their elements are reversed
14293 from what you would normally expect for a permute control vector. */
14294 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14295 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14296 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14297 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14298 unsigned int *swaparray, i;
14299 rtx perm[16];
14301 switch (mode)
14303 case E_V2DFmode:
14304 case E_V2DImode:
14305 swaparray = swap2;
14306 break;
14307 case E_V4SFmode:
14308 case E_V4SImode:
14309 swaparray = swap4;
14310 break;
14311 case E_V8HImode:
14312 swaparray = swap8;
14313 break;
14314 case E_V16QImode:
14315 swaparray = swap16;
14316 break;
14317 default:
14318 gcc_unreachable ();
14321 for (i = 0; i < 16; ++i)
14322 perm[i] = GEN_INT (swaparray[i]);
14324 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14327 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14328 with -maltivec=be specified. Issue the load followed by an element-
14329 reversing permute. */
14330 void
14331 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14333 rtx tmp = gen_reg_rtx (mode);
14334 rtx load = gen_rtx_SET (tmp, op1);
14335 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14336 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14337 rtx sel = swap_selector_for_mode (mode);
14338 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14340 gcc_assert (REG_P (op0));
14341 emit_insn (par);
14342 emit_insn (gen_rtx_SET (op0, vperm));
14345 /* Generate code for a "stvxl" built-in for a little endian target with
14346 -maltivec=be specified. Issue the store preceded by an element-reversing
14347 permute. */
14348 void
14349 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14351 rtx tmp = gen_reg_rtx (mode);
14352 rtx store = gen_rtx_SET (op0, tmp);
14353 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14354 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14355 rtx sel = swap_selector_for_mode (mode);
14356 rtx vperm;
14358 gcc_assert (REG_P (op1));
14359 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14360 emit_insn (gen_rtx_SET (tmp, vperm));
14361 emit_insn (par);
14364 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14365 specified. Issue the store preceded by an element-reversing permute. */
14366 void
14367 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14369 machine_mode inner_mode = GET_MODE_INNER (mode);
14370 rtx tmp = gen_reg_rtx (mode);
14371 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14372 rtx sel = swap_selector_for_mode (mode);
14373 rtx vperm;
14375 gcc_assert (REG_P (op1));
14376 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14377 emit_insn (gen_rtx_SET (tmp, vperm));
14378 emit_insn (gen_rtx_SET (op0, stvx));
14381 static rtx
14382 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14384 rtx pat, addr;
14385 tree arg0 = CALL_EXPR_ARG (exp, 0);
14386 tree arg1 = CALL_EXPR_ARG (exp, 1);
14387 machine_mode tmode = insn_data[icode].operand[0].mode;
14388 machine_mode mode0 = Pmode;
14389 machine_mode mode1 = Pmode;
14390 rtx op0 = expand_normal (arg0);
14391 rtx op1 = expand_normal (arg1);
14393 if (icode == CODE_FOR_nothing)
14394 /* Builtin not supported on this processor. */
14395 return 0;
14397 /* If we got invalid arguments bail out before generating bad rtl. */
14398 if (arg0 == error_mark_node || arg1 == error_mark_node)
14399 return const0_rtx;
14401 if (target == 0
14402 || GET_MODE (target) != tmode
14403 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14404 target = gen_reg_rtx (tmode);
14406 op1 = copy_to_mode_reg (mode1, op1);
14408 /* For LVX, express the RTL accurately by ANDing the address with -16.
14409 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14410 so the raw address is fine. */
14411 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14412 || icode == CODE_FOR_altivec_lvx_v2di_2op
14413 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14414 || icode == CODE_FOR_altivec_lvx_v4si_2op
14415 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14416 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14418 rtx rawaddr;
14419 if (op0 == const0_rtx)
14420 rawaddr = op1;
14421 else
14423 op0 = copy_to_mode_reg (mode0, op0);
14424 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14426 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14427 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14429 /* For -maltivec=be, emit the load and follow it up with a
14430 permute to swap the elements. */
14431 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14433 rtx temp = gen_reg_rtx (tmode);
14434 emit_insn (gen_rtx_SET (temp, addr));
14436 rtx sel = swap_selector_for_mode (tmode);
14437 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14438 UNSPEC_VPERM);
14439 emit_insn (gen_rtx_SET (target, vperm));
14441 else
14442 emit_insn (gen_rtx_SET (target, addr));
14444 else
14446 if (op0 == const0_rtx)
14447 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14448 else
14450 op0 = copy_to_mode_reg (mode0, op0);
14451 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14452 gen_rtx_PLUS (Pmode, op1, op0));
14455 pat = GEN_FCN (icode) (target, addr);
14456 if (! pat)
14457 return 0;
14458 emit_insn (pat);
14461 return target;
14464 static rtx
14465 altivec_expand_xl_be_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14467 rtx pat, addr;
14468 tree arg0 = CALL_EXPR_ARG (exp, 0);
14469 tree arg1 = CALL_EXPR_ARG (exp, 1);
14470 machine_mode tmode = insn_data[icode].operand[0].mode;
14471 machine_mode mode0 = Pmode;
14472 machine_mode mode1 = Pmode;
14473 rtx op0 = expand_normal (arg0);
14474 rtx op1 = expand_normal (arg1);
14476 if (icode == CODE_FOR_nothing)
14477 /* Builtin not supported on this processor. */
14478 return 0;
14480 /* If we got invalid arguments bail out before generating bad rtl. */
14481 if (arg0 == error_mark_node || arg1 == error_mark_node)
14482 return const0_rtx;
14484 if (target == 0
14485 || GET_MODE (target) != tmode
14486 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14487 target = gen_reg_rtx (tmode);
14489 op1 = copy_to_mode_reg (mode1, op1);
14491 if (op0 == const0_rtx)
14492 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14493 else
14495 op0 = copy_to_mode_reg (mode0, op0);
14496 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14497 gen_rtx_PLUS (Pmode, op1, op0));
14500 pat = GEN_FCN (icode) (target, addr);
14501 if (!pat)
14502 return 0;
14504 emit_insn (pat);
14505 /* Reverse element order of elements if in LE mode */
14506 if (!VECTOR_ELT_ORDER_BIG)
14508 rtx sel = swap_selector_for_mode (tmode);
14509 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, target, target, sel),
14510 UNSPEC_VPERM);
14511 emit_insn (gen_rtx_SET (target, vperm));
14513 return target;
14516 static rtx
14517 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14519 tree arg0 = CALL_EXPR_ARG (exp, 0);
14520 tree arg1 = CALL_EXPR_ARG (exp, 1);
14521 tree arg2 = CALL_EXPR_ARG (exp, 2);
14522 rtx op0 = expand_normal (arg0);
14523 rtx op1 = expand_normal (arg1);
14524 rtx op2 = expand_normal (arg2);
14525 rtx pat, addr;
14526 machine_mode tmode = insn_data[icode].operand[0].mode;
14527 machine_mode mode1 = Pmode;
14528 machine_mode mode2 = Pmode;
14530 /* Invalid arguments. Bail before doing anything stoopid! */
14531 if (arg0 == error_mark_node
14532 || arg1 == error_mark_node
14533 || arg2 == error_mark_node)
14534 return const0_rtx;
14536 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14537 op0 = copy_to_mode_reg (tmode, op0);
14539 op2 = copy_to_mode_reg (mode2, op2);
14541 if (op1 == const0_rtx)
14543 addr = gen_rtx_MEM (tmode, op2);
14545 else
14547 op1 = copy_to_mode_reg (mode1, op1);
14548 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14551 pat = GEN_FCN (icode) (addr, op0);
14552 if (pat)
14553 emit_insn (pat);
14554 return NULL_RTX;
14557 static rtx
14558 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14560 rtx pat;
14561 tree arg0 = CALL_EXPR_ARG (exp, 0);
14562 tree arg1 = CALL_EXPR_ARG (exp, 1);
14563 tree arg2 = CALL_EXPR_ARG (exp, 2);
14564 rtx op0 = expand_normal (arg0);
14565 rtx op1 = expand_normal (arg1);
14566 rtx op2 = expand_normal (arg2);
14567 machine_mode mode0 = insn_data[icode].operand[0].mode;
14568 machine_mode mode1 = insn_data[icode].operand[1].mode;
14569 machine_mode mode2 = insn_data[icode].operand[2].mode;
14571 if (icode == CODE_FOR_nothing)
14572 /* Builtin not supported on this processor. */
14573 return NULL_RTX;
14575 /* If we got invalid arguments bail out before generating bad rtl. */
14576 if (arg0 == error_mark_node
14577 || arg1 == error_mark_node
14578 || arg2 == error_mark_node)
14579 return NULL_RTX;
14581 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14582 op0 = copy_to_mode_reg (mode0, op0);
14583 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14584 op1 = copy_to_mode_reg (mode1, op1);
14585 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14586 op2 = copy_to_mode_reg (mode2, op2);
14588 pat = GEN_FCN (icode) (op0, op1, op2);
14589 if (pat)
14590 emit_insn (pat);
14592 return NULL_RTX;
14595 static rtx
14596 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14598 tree arg0 = CALL_EXPR_ARG (exp, 0);
14599 tree arg1 = CALL_EXPR_ARG (exp, 1);
14600 tree arg2 = CALL_EXPR_ARG (exp, 2);
14601 rtx op0 = expand_normal (arg0);
14602 rtx op1 = expand_normal (arg1);
14603 rtx op2 = expand_normal (arg2);
14604 rtx pat, addr, rawaddr;
14605 machine_mode tmode = insn_data[icode].operand[0].mode;
14606 machine_mode smode = insn_data[icode].operand[1].mode;
14607 machine_mode mode1 = Pmode;
14608 machine_mode mode2 = Pmode;
14610 /* Invalid arguments. Bail before doing anything stoopid! */
14611 if (arg0 == error_mark_node
14612 || arg1 == error_mark_node
14613 || arg2 == error_mark_node)
14614 return const0_rtx;
14616 op2 = copy_to_mode_reg (mode2, op2);
14618 /* For STVX, express the RTL accurately by ANDing the address with -16.
14619 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14620 so the raw address is fine. */
14621 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14622 || icode == CODE_FOR_altivec_stvx_v2di_2op
14623 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14624 || icode == CODE_FOR_altivec_stvx_v4si_2op
14625 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14626 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14628 if (op1 == const0_rtx)
14629 rawaddr = op2;
14630 else
14632 op1 = copy_to_mode_reg (mode1, op1);
14633 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14636 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14637 addr = gen_rtx_MEM (tmode, addr);
14639 op0 = copy_to_mode_reg (tmode, op0);
14641 /* For -maltivec=be, emit a permute to swap the elements, followed
14642 by the store. */
14643 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14645 rtx temp = gen_reg_rtx (tmode);
14646 rtx sel = swap_selector_for_mode (tmode);
14647 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14648 UNSPEC_VPERM);
14649 emit_insn (gen_rtx_SET (temp, vperm));
14650 emit_insn (gen_rtx_SET (addr, temp));
14652 else
14653 emit_insn (gen_rtx_SET (addr, op0));
14655 else
14657 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14658 op0 = copy_to_mode_reg (smode, op0);
14660 if (op1 == const0_rtx)
14661 addr = gen_rtx_MEM (tmode, op2);
14662 else
14664 op1 = copy_to_mode_reg (mode1, op1);
14665 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14668 pat = GEN_FCN (icode) (addr, op0);
14669 if (pat)
14670 emit_insn (pat);
14673 return NULL_RTX;
14676 /* Return the appropriate SPR number associated with the given builtin. */
14677 static inline HOST_WIDE_INT
14678 htm_spr_num (enum rs6000_builtins code)
14680 if (code == HTM_BUILTIN_GET_TFHAR
14681 || code == HTM_BUILTIN_SET_TFHAR)
14682 return TFHAR_SPR;
14683 else if (code == HTM_BUILTIN_GET_TFIAR
14684 || code == HTM_BUILTIN_SET_TFIAR)
14685 return TFIAR_SPR;
14686 else if (code == HTM_BUILTIN_GET_TEXASR
14687 || code == HTM_BUILTIN_SET_TEXASR)
14688 return TEXASR_SPR;
14689 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14690 || code == HTM_BUILTIN_SET_TEXASRU);
14691 return TEXASRU_SPR;
14694 /* Return the appropriate SPR regno associated with the given builtin. */
14695 static inline HOST_WIDE_INT
14696 htm_spr_regno (enum rs6000_builtins code)
14698 if (code == HTM_BUILTIN_GET_TFHAR
14699 || code == HTM_BUILTIN_SET_TFHAR)
14700 return TFHAR_REGNO;
14701 else if (code == HTM_BUILTIN_GET_TFIAR
14702 || code == HTM_BUILTIN_SET_TFIAR)
14703 return TFIAR_REGNO;
14704 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14705 || code == HTM_BUILTIN_SET_TEXASR
14706 || code == HTM_BUILTIN_GET_TEXASRU
14707 || code == HTM_BUILTIN_SET_TEXASRU);
14708 return TEXASR_REGNO;
14711 /* Return the correct ICODE value depending on whether we are
14712 setting or reading the HTM SPRs. */
14713 static inline enum insn_code
14714 rs6000_htm_spr_icode (bool nonvoid)
14716 if (nonvoid)
14717 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14718 else
14719 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14722 /* Expand the HTM builtin in EXP and store the result in TARGET.
14723 Store true in *EXPANDEDP if we found a builtin to expand. */
14724 static rtx
14725 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14727 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14728 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14729 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14730 const struct builtin_description *d;
14731 size_t i;
14733 *expandedp = true;
14735 if (!TARGET_POWERPC64
14736 && (fcode == HTM_BUILTIN_TABORTDC
14737 || fcode == HTM_BUILTIN_TABORTDCI))
14739 size_t uns_fcode = (size_t)fcode;
14740 const char *name = rs6000_builtin_info[uns_fcode].name;
14741 error ("builtin %qs is only valid in 64-bit mode", name);
14742 return const0_rtx;
14745 /* Expand the HTM builtins. */
14746 d = bdesc_htm;
14747 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14748 if (d->code == fcode)
14750 rtx op[MAX_HTM_OPERANDS], pat;
14751 int nopnds = 0;
14752 tree arg;
14753 call_expr_arg_iterator iter;
14754 unsigned attr = rs6000_builtin_info[fcode].attr;
14755 enum insn_code icode = d->icode;
14756 const struct insn_operand_data *insn_op;
14757 bool uses_spr = (attr & RS6000_BTC_SPR);
14758 rtx cr = NULL_RTX;
14760 if (uses_spr)
14761 icode = rs6000_htm_spr_icode (nonvoid);
14762 insn_op = &insn_data[icode].operand[0];
14764 if (nonvoid)
14766 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14767 if (!target
14768 || GET_MODE (target) != tmode
14769 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14770 target = gen_reg_rtx (tmode);
14771 if (uses_spr)
14772 op[nopnds++] = target;
14775 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14777 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14778 return const0_rtx;
14780 insn_op = &insn_data[icode].operand[nopnds];
14782 op[nopnds] = expand_normal (arg);
14784 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14786 if (!strcmp (insn_op->constraint, "n"))
14788 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14789 if (!CONST_INT_P (op[nopnds]))
14790 error ("argument %d must be an unsigned literal", arg_num);
14791 else
14792 error ("argument %d is an unsigned literal that is "
14793 "out of range", arg_num);
14794 return const0_rtx;
14796 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14799 nopnds++;
14802 /* Handle the builtins for extended mnemonics. These accept
14803 no arguments, but map to builtins that take arguments. */
14804 switch (fcode)
14806 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14807 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14808 op[nopnds++] = GEN_INT (1);
14809 if (flag_checking)
14810 attr |= RS6000_BTC_UNARY;
14811 break;
14812 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14813 op[nopnds++] = GEN_INT (0);
14814 if (flag_checking)
14815 attr |= RS6000_BTC_UNARY;
14816 break;
14817 default:
14818 break;
14821 /* If this builtin accesses SPRs, then pass in the appropriate
14822 SPR number and SPR regno as the last two operands. */
14823 if (uses_spr)
14825 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14826 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14827 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14829 /* If this builtin accesses a CR, then pass in a scratch
14830 CR as the last operand. */
14831 else if (attr & RS6000_BTC_CR)
14832 { cr = gen_reg_rtx (CCmode);
14833 op[nopnds++] = cr;
14836 if (flag_checking)
14838 int expected_nopnds = 0;
14839 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14840 expected_nopnds = 1;
14841 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14842 expected_nopnds = 2;
14843 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14844 expected_nopnds = 3;
14845 if (!(attr & RS6000_BTC_VOID))
14846 expected_nopnds += 1;
14847 if (uses_spr)
14848 expected_nopnds += 2;
14850 gcc_assert (nopnds == expected_nopnds
14851 && nopnds <= MAX_HTM_OPERANDS);
14854 switch (nopnds)
14856 case 1:
14857 pat = GEN_FCN (icode) (op[0]);
14858 break;
14859 case 2:
14860 pat = GEN_FCN (icode) (op[0], op[1]);
14861 break;
14862 case 3:
14863 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14864 break;
14865 case 4:
14866 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14867 break;
14868 default:
14869 gcc_unreachable ();
14871 if (!pat)
14872 return NULL_RTX;
14873 emit_insn (pat);
14875 if (attr & RS6000_BTC_CR)
14877 if (fcode == HTM_BUILTIN_TBEGIN)
14879 /* Emit code to set TARGET to true or false depending on
14880 whether the tbegin. instruction successfully or failed
14881 to start a transaction. We do this by placing the 1's
14882 complement of CR's EQ bit into TARGET. */
14883 rtx scratch = gen_reg_rtx (SImode);
14884 emit_insn (gen_rtx_SET (scratch,
14885 gen_rtx_EQ (SImode, cr,
14886 const0_rtx)));
14887 emit_insn (gen_rtx_SET (target,
14888 gen_rtx_XOR (SImode, scratch,
14889 GEN_INT (1))));
14891 else
14893 /* Emit code to copy the 4-bit condition register field
14894 CR into the least significant end of register TARGET. */
14895 rtx scratch1 = gen_reg_rtx (SImode);
14896 rtx scratch2 = gen_reg_rtx (SImode);
14897 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14898 emit_insn (gen_movcc (subreg, cr));
14899 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14900 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14904 if (nonvoid)
14905 return target;
14906 return const0_rtx;
14909 *expandedp = false;
14910 return NULL_RTX;
14913 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14915 static rtx
14916 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14917 rtx target)
14919 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14920 if (fcode == RS6000_BUILTIN_CPU_INIT)
14921 return const0_rtx;
14923 if (target == 0 || GET_MODE (target) != SImode)
14924 target = gen_reg_rtx (SImode);
14926 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14927 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14928 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14929 to a STRING_CST. */
14930 if (TREE_CODE (arg) == ARRAY_REF
14931 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14932 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14933 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14934 arg = TREE_OPERAND (arg, 0);
14936 if (TREE_CODE (arg) != STRING_CST)
14938 error ("builtin %qs only accepts a string argument",
14939 rs6000_builtin_info[(size_t) fcode].name);
14940 return const0_rtx;
14943 if (fcode == RS6000_BUILTIN_CPU_IS)
14945 const char *cpu = TREE_STRING_POINTER (arg);
14946 rtx cpuid = NULL_RTX;
14947 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14948 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14950 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14951 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14952 break;
14954 if (cpuid == NULL_RTX)
14956 /* Invalid CPU argument. */
14957 error ("cpu %qs is an invalid argument to builtin %qs",
14958 cpu, rs6000_builtin_info[(size_t) fcode].name);
14959 return const0_rtx;
14962 rtx platform = gen_reg_rtx (SImode);
14963 rtx tcbmem = gen_const_mem (SImode,
14964 gen_rtx_PLUS (Pmode,
14965 gen_rtx_REG (Pmode, TLS_REGNUM),
14966 GEN_INT (TCB_PLATFORM_OFFSET)));
14967 emit_move_insn (platform, tcbmem);
14968 emit_insn (gen_eqsi3 (target, platform, cpuid));
14970 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14972 const char *hwcap = TREE_STRING_POINTER (arg);
14973 rtx mask = NULL_RTX;
14974 int hwcap_offset;
14975 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14976 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14978 mask = GEN_INT (cpu_supports_info[i].mask);
14979 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14980 break;
14982 if (mask == NULL_RTX)
14984 /* Invalid HWCAP argument. */
14985 error ("%s %qs is an invalid argument to builtin %qs",
14986 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14987 return const0_rtx;
14990 rtx tcb_hwcap = gen_reg_rtx (SImode);
14991 rtx tcbmem = gen_const_mem (SImode,
14992 gen_rtx_PLUS (Pmode,
14993 gen_rtx_REG (Pmode, TLS_REGNUM),
14994 GEN_INT (hwcap_offset)));
14995 emit_move_insn (tcb_hwcap, tcbmem);
14996 rtx scratch1 = gen_reg_rtx (SImode);
14997 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14998 rtx scratch2 = gen_reg_rtx (SImode);
14999 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
15000 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
15002 else
15003 gcc_unreachable ();
15005 /* Record that we have expanded a CPU builtin, so that we can later
15006 emit a reference to the special symbol exported by LIBC to ensure we
15007 do not link against an old LIBC that doesn't support this feature. */
15008 cpu_builtin_p = true;
15010 #else
15011 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
15012 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
15014 /* For old LIBCs, always return FALSE. */
15015 emit_move_insn (target, GEN_INT (0));
15016 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15018 return target;
15021 static rtx
15022 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15024 rtx pat;
15025 tree arg0 = CALL_EXPR_ARG (exp, 0);
15026 tree arg1 = CALL_EXPR_ARG (exp, 1);
15027 tree arg2 = CALL_EXPR_ARG (exp, 2);
15028 rtx op0 = expand_normal (arg0);
15029 rtx op1 = expand_normal (arg1);
15030 rtx op2 = expand_normal (arg2);
15031 machine_mode tmode = insn_data[icode].operand[0].mode;
15032 machine_mode mode0 = insn_data[icode].operand[1].mode;
15033 machine_mode mode1 = insn_data[icode].operand[2].mode;
15034 machine_mode mode2 = insn_data[icode].operand[3].mode;
15036 if (icode == CODE_FOR_nothing)
15037 /* Builtin not supported on this processor. */
15038 return 0;
15040 /* If we got invalid arguments bail out before generating bad rtl. */
15041 if (arg0 == error_mark_node
15042 || arg1 == error_mark_node
15043 || arg2 == error_mark_node)
15044 return const0_rtx;
15046 /* Check and prepare argument depending on the instruction code.
15048 Note that a switch statement instead of the sequence of tests
15049 would be incorrect as many of the CODE_FOR values could be
15050 CODE_FOR_nothing and that would yield multiple alternatives
15051 with identical values. We'd never reach here at runtime in
15052 this case. */
15053 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15054 || icode == CODE_FOR_altivec_vsldoi_v2df
15055 || icode == CODE_FOR_altivec_vsldoi_v4si
15056 || icode == CODE_FOR_altivec_vsldoi_v8hi
15057 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15059 /* Only allow 4-bit unsigned literals. */
15060 STRIP_NOPS (arg2);
15061 if (TREE_CODE (arg2) != INTEGER_CST
15062 || TREE_INT_CST_LOW (arg2) & ~0xf)
15064 error ("argument 3 must be a 4-bit unsigned literal");
15065 return CONST0_RTX (tmode);
15068 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15069 || icode == CODE_FOR_vsx_xxpermdi_v2di
15070 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
15071 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
15072 || icode == CODE_FOR_vsx_xxpermdi_v1ti
15073 || icode == CODE_FOR_vsx_xxpermdi_v4sf
15074 || icode == CODE_FOR_vsx_xxpermdi_v4si
15075 || icode == CODE_FOR_vsx_xxpermdi_v8hi
15076 || icode == CODE_FOR_vsx_xxpermdi_v16qi
15077 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15078 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15079 || icode == CODE_FOR_vsx_xxsldwi_v4si
15080 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15081 || icode == CODE_FOR_vsx_xxsldwi_v2di
15082 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15084 /* Only allow 2-bit unsigned literals. */
15085 STRIP_NOPS (arg2);
15086 if (TREE_CODE (arg2) != INTEGER_CST
15087 || TREE_INT_CST_LOW (arg2) & ~0x3)
15089 error ("argument 3 must be a 2-bit unsigned literal");
15090 return CONST0_RTX (tmode);
15093 else if (icode == CODE_FOR_vsx_set_v2df
15094 || icode == CODE_FOR_vsx_set_v2di
15095 || icode == CODE_FOR_bcdadd
15096 || icode == CODE_FOR_bcdadd_lt
15097 || icode == CODE_FOR_bcdadd_eq
15098 || icode == CODE_FOR_bcdadd_gt
15099 || icode == CODE_FOR_bcdsub
15100 || icode == CODE_FOR_bcdsub_lt
15101 || icode == CODE_FOR_bcdsub_eq
15102 || icode == CODE_FOR_bcdsub_gt)
15104 /* Only allow 1-bit unsigned literals. */
15105 STRIP_NOPS (arg2);
15106 if (TREE_CODE (arg2) != INTEGER_CST
15107 || TREE_INT_CST_LOW (arg2) & ~0x1)
15109 error ("argument 3 must be a 1-bit unsigned literal");
15110 return CONST0_RTX (tmode);
15113 else if (icode == CODE_FOR_dfp_ddedpd_dd
15114 || icode == CODE_FOR_dfp_ddedpd_td)
15116 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15117 STRIP_NOPS (arg0);
15118 if (TREE_CODE (arg0) != INTEGER_CST
15119 || TREE_INT_CST_LOW (arg2) & ~0x3)
15121 error ("argument 1 must be 0 or 2");
15122 return CONST0_RTX (tmode);
15125 else if (icode == CODE_FOR_dfp_denbcd_dd
15126 || icode == CODE_FOR_dfp_denbcd_td)
15128 /* Only allow 1-bit unsigned literals. */
15129 STRIP_NOPS (arg0);
15130 if (TREE_CODE (arg0) != INTEGER_CST
15131 || TREE_INT_CST_LOW (arg0) & ~0x1)
15133 error ("argument 1 must be a 1-bit unsigned literal");
15134 return CONST0_RTX (tmode);
15137 else if (icode == CODE_FOR_dfp_dscli_dd
15138 || icode == CODE_FOR_dfp_dscli_td
15139 || icode == CODE_FOR_dfp_dscri_dd
15140 || icode == CODE_FOR_dfp_dscri_td)
15142 /* Only allow 6-bit unsigned literals. */
15143 STRIP_NOPS (arg1);
15144 if (TREE_CODE (arg1) != INTEGER_CST
15145 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15147 error ("argument 2 must be a 6-bit unsigned literal");
15148 return CONST0_RTX (tmode);
15151 else if (icode == CODE_FOR_crypto_vshasigmaw
15152 || icode == CODE_FOR_crypto_vshasigmad)
15154 /* Check whether the 2nd and 3rd arguments are integer constants and in
15155 range and prepare arguments. */
15156 STRIP_NOPS (arg1);
15157 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15159 error ("argument 2 must be 0 or 1");
15160 return CONST0_RTX (tmode);
15163 STRIP_NOPS (arg2);
15164 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg2, 16))
15166 error ("argument 3 must be in the range 0..15");
15167 return CONST0_RTX (tmode);
15171 if (target == 0
15172 || GET_MODE (target) != tmode
15173 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15174 target = gen_reg_rtx (tmode);
15176 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15177 op0 = copy_to_mode_reg (mode0, op0);
15178 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15179 op1 = copy_to_mode_reg (mode1, op1);
15180 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15181 op2 = copy_to_mode_reg (mode2, op2);
15183 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15184 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15185 else
15186 pat = GEN_FCN (icode) (target, op0, op1, op2);
15187 if (! pat)
15188 return 0;
15189 emit_insn (pat);
15191 return target;
15194 /* Expand the lvx builtins. */
15195 static rtx
15196 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15198 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15199 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15200 tree arg0;
15201 machine_mode tmode, mode0;
15202 rtx pat, op0;
15203 enum insn_code icode;
15205 switch (fcode)
15207 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15208 icode = CODE_FOR_vector_altivec_load_v16qi;
15209 break;
15210 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15211 icode = CODE_FOR_vector_altivec_load_v8hi;
15212 break;
15213 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15214 icode = CODE_FOR_vector_altivec_load_v4si;
15215 break;
15216 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15217 icode = CODE_FOR_vector_altivec_load_v4sf;
15218 break;
15219 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15220 icode = CODE_FOR_vector_altivec_load_v2df;
15221 break;
15222 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15223 icode = CODE_FOR_vector_altivec_load_v2di;
15224 break;
15225 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15226 icode = CODE_FOR_vector_altivec_load_v1ti;
15227 break;
15228 default:
15229 *expandedp = false;
15230 return NULL_RTX;
15233 *expandedp = true;
15235 arg0 = CALL_EXPR_ARG (exp, 0);
15236 op0 = expand_normal (arg0);
15237 tmode = insn_data[icode].operand[0].mode;
15238 mode0 = insn_data[icode].operand[1].mode;
15240 if (target == 0
15241 || GET_MODE (target) != tmode
15242 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15243 target = gen_reg_rtx (tmode);
15245 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15246 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15248 pat = GEN_FCN (icode) (target, op0);
15249 if (! pat)
15250 return 0;
15251 emit_insn (pat);
15252 return target;
15255 /* Expand the stvx builtins. */
15256 static rtx
15257 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15258 bool *expandedp)
15260 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15261 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15262 tree arg0, arg1;
15263 machine_mode mode0, mode1;
15264 rtx pat, op0, op1;
15265 enum insn_code icode;
15267 switch (fcode)
15269 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15270 icode = CODE_FOR_vector_altivec_store_v16qi;
15271 break;
15272 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15273 icode = CODE_FOR_vector_altivec_store_v8hi;
15274 break;
15275 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15276 icode = CODE_FOR_vector_altivec_store_v4si;
15277 break;
15278 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15279 icode = CODE_FOR_vector_altivec_store_v4sf;
15280 break;
15281 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15282 icode = CODE_FOR_vector_altivec_store_v2df;
15283 break;
15284 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15285 icode = CODE_FOR_vector_altivec_store_v2di;
15286 break;
15287 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15288 icode = CODE_FOR_vector_altivec_store_v1ti;
15289 break;
15290 default:
15291 *expandedp = false;
15292 return NULL_RTX;
15295 arg0 = CALL_EXPR_ARG (exp, 0);
15296 arg1 = CALL_EXPR_ARG (exp, 1);
15297 op0 = expand_normal (arg0);
15298 op1 = expand_normal (arg1);
15299 mode0 = insn_data[icode].operand[0].mode;
15300 mode1 = insn_data[icode].operand[1].mode;
15302 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15303 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15304 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15305 op1 = copy_to_mode_reg (mode1, op1);
15307 pat = GEN_FCN (icode) (op0, op1);
15308 if (pat)
15309 emit_insn (pat);
15311 *expandedp = true;
15312 return NULL_RTX;
15315 /* Expand the dst builtins. */
15316 static rtx
15317 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15318 bool *expandedp)
15320 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15321 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15322 tree arg0, arg1, arg2;
15323 machine_mode mode0, mode1;
15324 rtx pat, op0, op1, op2;
15325 const struct builtin_description *d;
15326 size_t i;
15328 *expandedp = false;
15330 /* Handle DST variants. */
15331 d = bdesc_dst;
15332 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15333 if (d->code == fcode)
15335 arg0 = CALL_EXPR_ARG (exp, 0);
15336 arg1 = CALL_EXPR_ARG (exp, 1);
15337 arg2 = CALL_EXPR_ARG (exp, 2);
15338 op0 = expand_normal (arg0);
15339 op1 = expand_normal (arg1);
15340 op2 = expand_normal (arg2);
15341 mode0 = insn_data[d->icode].operand[0].mode;
15342 mode1 = insn_data[d->icode].operand[1].mode;
15344 /* Invalid arguments, bail out before generating bad rtl. */
15345 if (arg0 == error_mark_node
15346 || arg1 == error_mark_node
15347 || arg2 == error_mark_node)
15348 return const0_rtx;
15350 *expandedp = true;
15351 STRIP_NOPS (arg2);
15352 if (TREE_CODE (arg2) != INTEGER_CST
15353 || TREE_INT_CST_LOW (arg2) & ~0x3)
15355 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15356 return const0_rtx;
15359 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15360 op0 = copy_to_mode_reg (Pmode, op0);
15361 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15362 op1 = copy_to_mode_reg (mode1, op1);
15364 pat = GEN_FCN (d->icode) (op0, op1, op2);
15365 if (pat != 0)
15366 emit_insn (pat);
15368 return NULL_RTX;
15371 return NULL_RTX;
15374 /* Expand vec_init builtin. */
15375 static rtx
15376 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15378 machine_mode tmode = TYPE_MODE (type);
15379 machine_mode inner_mode = GET_MODE_INNER (tmode);
15380 int i, n_elt = GET_MODE_NUNITS (tmode);
15382 gcc_assert (VECTOR_MODE_P (tmode));
15383 gcc_assert (n_elt == call_expr_nargs (exp));
15385 if (!target || !register_operand (target, tmode))
15386 target = gen_reg_rtx (tmode);
15388 /* If we have a vector compromised of a single element, such as V1TImode, do
15389 the initialization directly. */
15390 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15392 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15393 emit_move_insn (target, gen_lowpart (tmode, x));
15395 else
15397 rtvec v = rtvec_alloc (n_elt);
15399 for (i = 0; i < n_elt; ++i)
15401 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15402 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15405 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15408 return target;
15411 /* Return the integer constant in ARG. Constrain it to be in the range
15412 of the subparts of VEC_TYPE; issue an error if not. */
15414 static int
15415 get_element_number (tree vec_type, tree arg)
15417 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15419 if (!tree_fits_uhwi_p (arg)
15420 || (elt = tree_to_uhwi (arg), elt > max))
15422 error ("selector must be an integer constant in the range 0..%wi", max);
15423 return 0;
15426 return elt;
15429 /* Expand vec_set builtin. */
15430 static rtx
15431 altivec_expand_vec_set_builtin (tree exp)
15433 machine_mode tmode, mode1;
15434 tree arg0, arg1, arg2;
15435 int elt;
15436 rtx op0, op1;
15438 arg0 = CALL_EXPR_ARG (exp, 0);
15439 arg1 = CALL_EXPR_ARG (exp, 1);
15440 arg2 = CALL_EXPR_ARG (exp, 2);
15442 tmode = TYPE_MODE (TREE_TYPE (arg0));
15443 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15444 gcc_assert (VECTOR_MODE_P (tmode));
15446 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15447 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15448 elt = get_element_number (TREE_TYPE (arg0), arg2);
15450 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15451 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15453 op0 = force_reg (tmode, op0);
15454 op1 = force_reg (mode1, op1);
15456 rs6000_expand_vector_set (op0, op1, elt);
15458 return op0;
15461 /* Expand vec_ext builtin. */
15462 static rtx
15463 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15465 machine_mode tmode, mode0;
15466 tree arg0, arg1;
15467 rtx op0;
15468 rtx op1;
15470 arg0 = CALL_EXPR_ARG (exp, 0);
15471 arg1 = CALL_EXPR_ARG (exp, 1);
15473 op0 = expand_normal (arg0);
15474 op1 = expand_normal (arg1);
15476 /* Call get_element_number to validate arg1 if it is a constant. */
15477 if (TREE_CODE (arg1) == INTEGER_CST)
15478 (void) get_element_number (TREE_TYPE (arg0), arg1);
15480 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15481 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15482 gcc_assert (VECTOR_MODE_P (mode0));
15484 op0 = force_reg (mode0, op0);
15486 if (optimize || !target || !register_operand (target, tmode))
15487 target = gen_reg_rtx (tmode);
15489 rs6000_expand_vector_extract (target, op0, op1);
15491 return target;
15494 /* Expand the builtin in EXP and store the result in TARGET. Store
15495 true in *EXPANDEDP if we found a builtin to expand. */
15496 static rtx
15497 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15499 const struct builtin_description *d;
15500 size_t i;
15501 enum insn_code icode;
15502 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15503 tree arg0, arg1, arg2;
15504 rtx op0, pat;
15505 machine_mode tmode, mode0;
15506 enum rs6000_builtins fcode
15507 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15509 if (rs6000_overloaded_builtin_p (fcode))
15511 *expandedp = true;
15512 error ("unresolved overload for Altivec builtin %qF", fndecl);
15514 /* Given it is invalid, just generate a normal call. */
15515 return expand_call (exp, target, false);
15518 target = altivec_expand_ld_builtin (exp, target, expandedp);
15519 if (*expandedp)
15520 return target;
15522 target = altivec_expand_st_builtin (exp, target, expandedp);
15523 if (*expandedp)
15524 return target;
15526 target = altivec_expand_dst_builtin (exp, target, expandedp);
15527 if (*expandedp)
15528 return target;
15530 *expandedp = true;
15532 switch (fcode)
15534 case ALTIVEC_BUILTIN_STVX_V2DF:
15535 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15536 case ALTIVEC_BUILTIN_STVX_V2DI:
15537 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15538 case ALTIVEC_BUILTIN_STVX_V4SF:
15539 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15540 case ALTIVEC_BUILTIN_STVX:
15541 case ALTIVEC_BUILTIN_STVX_V4SI:
15542 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15543 case ALTIVEC_BUILTIN_STVX_V8HI:
15544 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15545 case ALTIVEC_BUILTIN_STVX_V16QI:
15546 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15547 case ALTIVEC_BUILTIN_STVEBX:
15548 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15549 case ALTIVEC_BUILTIN_STVEHX:
15550 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15551 case ALTIVEC_BUILTIN_STVEWX:
15552 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15553 case ALTIVEC_BUILTIN_STVXL_V2DF:
15554 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15555 case ALTIVEC_BUILTIN_STVXL_V2DI:
15556 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15557 case ALTIVEC_BUILTIN_STVXL_V4SF:
15558 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15559 case ALTIVEC_BUILTIN_STVXL:
15560 case ALTIVEC_BUILTIN_STVXL_V4SI:
15561 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15562 case ALTIVEC_BUILTIN_STVXL_V8HI:
15563 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15564 case ALTIVEC_BUILTIN_STVXL_V16QI:
15565 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15567 case ALTIVEC_BUILTIN_STVLX:
15568 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15569 case ALTIVEC_BUILTIN_STVLXL:
15570 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15571 case ALTIVEC_BUILTIN_STVRX:
15572 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15573 case ALTIVEC_BUILTIN_STVRXL:
15574 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15576 case P9V_BUILTIN_STXVL:
15577 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15579 case VSX_BUILTIN_STXVD2X_V1TI:
15580 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15581 case VSX_BUILTIN_STXVD2X_V2DF:
15582 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15583 case VSX_BUILTIN_STXVD2X_V2DI:
15584 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15585 case VSX_BUILTIN_STXVW4X_V4SF:
15586 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15587 case VSX_BUILTIN_STXVW4X_V4SI:
15588 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15589 case VSX_BUILTIN_STXVW4X_V8HI:
15590 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15591 case VSX_BUILTIN_STXVW4X_V16QI:
15592 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15594 /* For the following on big endian, it's ok to use any appropriate
15595 unaligned-supporting store, so use a generic expander. For
15596 little-endian, the exact element-reversing instruction must
15597 be used. */
15598 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15600 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15601 : CODE_FOR_vsx_st_elemrev_v2df);
15602 return altivec_expand_stv_builtin (code, exp);
15604 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15606 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15607 : CODE_FOR_vsx_st_elemrev_v2di);
15608 return altivec_expand_stv_builtin (code, exp);
15610 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15612 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15613 : CODE_FOR_vsx_st_elemrev_v4sf);
15614 return altivec_expand_stv_builtin (code, exp);
15616 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15618 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15619 : CODE_FOR_vsx_st_elemrev_v4si);
15620 return altivec_expand_stv_builtin (code, exp);
15622 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15624 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15625 : CODE_FOR_vsx_st_elemrev_v8hi);
15626 return altivec_expand_stv_builtin (code, exp);
15628 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15630 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15631 : CODE_FOR_vsx_st_elemrev_v16qi);
15632 return altivec_expand_stv_builtin (code, exp);
15635 case ALTIVEC_BUILTIN_MFVSCR:
15636 icode = CODE_FOR_altivec_mfvscr;
15637 tmode = insn_data[icode].operand[0].mode;
15639 if (target == 0
15640 || GET_MODE (target) != tmode
15641 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15642 target = gen_reg_rtx (tmode);
15644 pat = GEN_FCN (icode) (target);
15645 if (! pat)
15646 return 0;
15647 emit_insn (pat);
15648 return target;
15650 case ALTIVEC_BUILTIN_MTVSCR:
15651 icode = CODE_FOR_altivec_mtvscr;
15652 arg0 = CALL_EXPR_ARG (exp, 0);
15653 op0 = expand_normal (arg0);
15654 mode0 = insn_data[icode].operand[0].mode;
15656 /* If we got invalid arguments bail out before generating bad rtl. */
15657 if (arg0 == error_mark_node)
15658 return const0_rtx;
15660 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15661 op0 = copy_to_mode_reg (mode0, op0);
15663 pat = GEN_FCN (icode) (op0);
15664 if (pat)
15665 emit_insn (pat);
15666 return NULL_RTX;
15668 case ALTIVEC_BUILTIN_DSSALL:
15669 emit_insn (gen_altivec_dssall ());
15670 return NULL_RTX;
15672 case ALTIVEC_BUILTIN_DSS:
15673 icode = CODE_FOR_altivec_dss;
15674 arg0 = CALL_EXPR_ARG (exp, 0);
15675 STRIP_NOPS (arg0);
15676 op0 = expand_normal (arg0);
15677 mode0 = insn_data[icode].operand[0].mode;
15679 /* If we got invalid arguments bail out before generating bad rtl. */
15680 if (arg0 == error_mark_node)
15681 return const0_rtx;
15683 if (TREE_CODE (arg0) != INTEGER_CST
15684 || TREE_INT_CST_LOW (arg0) & ~0x3)
15686 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15687 return const0_rtx;
15690 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15691 op0 = copy_to_mode_reg (mode0, op0);
15693 emit_insn (gen_altivec_dss (op0));
15694 return NULL_RTX;
15696 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15697 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15698 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15699 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15700 case VSX_BUILTIN_VEC_INIT_V2DF:
15701 case VSX_BUILTIN_VEC_INIT_V2DI:
15702 case VSX_BUILTIN_VEC_INIT_V1TI:
15703 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15705 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15706 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15707 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15708 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15709 case VSX_BUILTIN_VEC_SET_V2DF:
15710 case VSX_BUILTIN_VEC_SET_V2DI:
15711 case VSX_BUILTIN_VEC_SET_V1TI:
15712 return altivec_expand_vec_set_builtin (exp);
15714 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15715 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15716 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15717 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15718 case VSX_BUILTIN_VEC_EXT_V2DF:
15719 case VSX_BUILTIN_VEC_EXT_V2DI:
15720 case VSX_BUILTIN_VEC_EXT_V1TI:
15721 return altivec_expand_vec_ext_builtin (exp, target);
15723 case P9V_BUILTIN_VEXTRACT4B:
15724 case P9V_BUILTIN_VEC_VEXTRACT4B:
15725 arg1 = CALL_EXPR_ARG (exp, 1);
15726 STRIP_NOPS (arg1);
15728 /* Generate a normal call if it is invalid. */
15729 if (arg1 == error_mark_node)
15730 return expand_call (exp, target, false);
15732 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15734 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15735 return expand_call (exp, target, false);
15737 break;
15739 case P9V_BUILTIN_VINSERT4B:
15740 case P9V_BUILTIN_VINSERT4B_DI:
15741 case P9V_BUILTIN_VEC_VINSERT4B:
15742 arg2 = CALL_EXPR_ARG (exp, 2);
15743 STRIP_NOPS (arg2);
15745 /* Generate a normal call if it is invalid. */
15746 if (arg2 == error_mark_node)
15747 return expand_call (exp, target, false);
15749 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15751 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15752 return expand_call (exp, target, false);
15754 break;
15756 default:
15757 break;
15758 /* Fall through. */
15761 /* Expand abs* operations. */
15762 d = bdesc_abs;
15763 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15764 if (d->code == fcode)
15765 return altivec_expand_abs_builtin (d->icode, exp, target);
15767 /* Expand the AltiVec predicates. */
15768 d = bdesc_altivec_preds;
15769 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15770 if (d->code == fcode)
15771 return altivec_expand_predicate_builtin (d->icode, exp, target);
15773 /* LV* are funky. We initialized them differently. */
15774 switch (fcode)
15776 case ALTIVEC_BUILTIN_LVSL:
15777 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15778 exp, target, false);
15779 case ALTIVEC_BUILTIN_LVSR:
15780 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15781 exp, target, false);
15782 case ALTIVEC_BUILTIN_LVEBX:
15783 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15784 exp, target, false);
15785 case ALTIVEC_BUILTIN_LVEHX:
15786 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15787 exp, target, false);
15788 case ALTIVEC_BUILTIN_LVEWX:
15789 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15790 exp, target, false);
15791 case ALTIVEC_BUILTIN_LVXL_V2DF:
15792 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15793 exp, target, false);
15794 case ALTIVEC_BUILTIN_LVXL_V2DI:
15795 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15796 exp, target, false);
15797 case ALTIVEC_BUILTIN_LVXL_V4SF:
15798 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15799 exp, target, false);
15800 case ALTIVEC_BUILTIN_LVXL:
15801 case ALTIVEC_BUILTIN_LVXL_V4SI:
15802 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15803 exp, target, false);
15804 case ALTIVEC_BUILTIN_LVXL_V8HI:
15805 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15806 exp, target, false);
15807 case ALTIVEC_BUILTIN_LVXL_V16QI:
15808 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15809 exp, target, false);
15810 case ALTIVEC_BUILTIN_LVX_V2DF:
15811 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
15812 exp, target, false);
15813 case ALTIVEC_BUILTIN_LVX_V2DI:
15814 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
15815 exp, target, false);
15816 case ALTIVEC_BUILTIN_LVX_V4SF:
15817 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
15818 exp, target, false);
15819 case ALTIVEC_BUILTIN_LVX:
15820 case ALTIVEC_BUILTIN_LVX_V4SI:
15821 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
15822 exp, target, false);
15823 case ALTIVEC_BUILTIN_LVX_V8HI:
15824 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
15825 exp, target, false);
15826 case ALTIVEC_BUILTIN_LVX_V16QI:
15827 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
15828 exp, target, false);
15829 case ALTIVEC_BUILTIN_LVLX:
15830 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15831 exp, target, true);
15832 case ALTIVEC_BUILTIN_LVLXL:
15833 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15834 exp, target, true);
15835 case ALTIVEC_BUILTIN_LVRX:
15836 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15837 exp, target, true);
15838 case ALTIVEC_BUILTIN_LVRXL:
15839 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15840 exp, target, true);
15841 case VSX_BUILTIN_LXVD2X_V1TI:
15842 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15843 exp, target, false);
15844 case VSX_BUILTIN_LXVD2X_V2DF:
15845 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15846 exp, target, false);
15847 case VSX_BUILTIN_LXVD2X_V2DI:
15848 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15849 exp, target, false);
15850 case VSX_BUILTIN_LXVW4X_V4SF:
15851 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15852 exp, target, false);
15853 case VSX_BUILTIN_LXVW4X_V4SI:
15854 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15855 exp, target, false);
15856 case VSX_BUILTIN_LXVW4X_V8HI:
15857 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15858 exp, target, false);
15859 case VSX_BUILTIN_LXVW4X_V16QI:
15860 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15861 exp, target, false);
15862 /* For the following on big endian, it's ok to use any appropriate
15863 unaligned-supporting load, so use a generic expander. For
15864 little-endian, the exact element-reversing instruction must
15865 be used. */
15866 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15868 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15869 : CODE_FOR_vsx_ld_elemrev_v2df);
15870 return altivec_expand_lv_builtin (code, exp, target, false);
15872 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15874 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15875 : CODE_FOR_vsx_ld_elemrev_v2di);
15876 return altivec_expand_lv_builtin (code, exp, target, false);
15878 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15880 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15881 : CODE_FOR_vsx_ld_elemrev_v4sf);
15882 return altivec_expand_lv_builtin (code, exp, target, false);
15884 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15886 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15887 : CODE_FOR_vsx_ld_elemrev_v4si);
15888 return altivec_expand_lv_builtin (code, exp, target, false);
15890 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15892 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15893 : CODE_FOR_vsx_ld_elemrev_v8hi);
15894 return altivec_expand_lv_builtin (code, exp, target, false);
15896 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15898 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15899 : CODE_FOR_vsx_ld_elemrev_v16qi);
15900 return altivec_expand_lv_builtin (code, exp, target, false);
15902 break;
15903 default:
15904 break;
15905 /* Fall through. */
15908 /* XL_BE We initialized them to always load in big endian order. */
15909 switch (fcode)
15911 case VSX_BUILTIN_XL_BE_V2DI:
15913 enum insn_code code = CODE_FOR_vsx_load_v2di;
15914 return altivec_expand_xl_be_builtin (code, exp, target, false);
15916 break;
15917 case VSX_BUILTIN_XL_BE_V4SI:
15919 enum insn_code code = CODE_FOR_vsx_load_v4si;
15920 return altivec_expand_xl_be_builtin (code, exp, target, false);
15922 break;
15923 case VSX_BUILTIN_XL_BE_V8HI:
15925 enum insn_code code = CODE_FOR_vsx_load_v8hi;
15926 return altivec_expand_xl_be_builtin (code, exp, target, false);
15928 break;
15929 case VSX_BUILTIN_XL_BE_V16QI:
15931 enum insn_code code = CODE_FOR_vsx_load_v16qi;
15932 return altivec_expand_xl_be_builtin (code, exp, target, false);
15934 break;
15935 case VSX_BUILTIN_XL_BE_V2DF:
15937 enum insn_code code = CODE_FOR_vsx_load_v2df;
15938 return altivec_expand_xl_be_builtin (code, exp, target, false);
15940 break;
15941 case VSX_BUILTIN_XL_BE_V4SF:
15943 enum insn_code code = CODE_FOR_vsx_load_v4sf;
15944 return altivec_expand_xl_be_builtin (code, exp, target, false);
15946 break;
15947 default:
15948 break;
15949 /* Fall through. */
15952 *expandedp = false;
15953 return NULL_RTX;
15956 /* Expand the builtin in EXP and store the result in TARGET. Store
15957 true in *EXPANDEDP if we found a builtin to expand. */
15958 static rtx
15959 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
15961 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15962 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15963 const struct builtin_description *d;
15964 size_t i;
15966 *expandedp = true;
15968 switch (fcode)
15970 case PAIRED_BUILTIN_STX:
15971 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
15972 case PAIRED_BUILTIN_LX:
15973 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
15974 default:
15975 break;
15976 /* Fall through. */
15979 /* Expand the paired predicates. */
15980 d = bdesc_paired_preds;
15981 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
15982 if (d->code == fcode)
15983 return paired_expand_predicate_builtin (d->icode, exp, target);
15985 *expandedp = false;
15986 return NULL_RTX;
15989 static rtx
15990 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
15992 rtx pat, scratch, tmp;
15993 tree form = CALL_EXPR_ARG (exp, 0);
15994 tree arg0 = CALL_EXPR_ARG (exp, 1);
15995 tree arg1 = CALL_EXPR_ARG (exp, 2);
15996 rtx op0 = expand_normal (arg0);
15997 rtx op1 = expand_normal (arg1);
15998 machine_mode mode0 = insn_data[icode].operand[1].mode;
15999 machine_mode mode1 = insn_data[icode].operand[2].mode;
16000 int form_int;
16001 enum rtx_code code;
16003 if (TREE_CODE (form) != INTEGER_CST)
16005 error ("argument 1 of %s must be a constant",
16006 "__builtin_paired_predicate");
16007 return const0_rtx;
16009 else
16010 form_int = TREE_INT_CST_LOW (form);
16012 gcc_assert (mode0 == mode1);
16014 if (arg0 == error_mark_node || arg1 == error_mark_node)
16015 return const0_rtx;
16017 if (target == 0
16018 || GET_MODE (target) != SImode
16019 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
16020 target = gen_reg_rtx (SImode);
16021 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16022 op0 = copy_to_mode_reg (mode0, op0);
16023 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16024 op1 = copy_to_mode_reg (mode1, op1);
16026 scratch = gen_reg_rtx (CCFPmode);
16028 pat = GEN_FCN (icode) (scratch, op0, op1);
16029 if (!pat)
16030 return const0_rtx;
16032 emit_insn (pat);
16034 switch (form_int)
16036 /* LT bit. */
16037 case 0:
16038 code = LT;
16039 break;
16040 /* GT bit. */
16041 case 1:
16042 code = GT;
16043 break;
16044 /* EQ bit. */
16045 case 2:
16046 code = EQ;
16047 break;
16048 /* UN bit. */
16049 case 3:
16050 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16051 return target;
16052 default:
16053 error ("argument 1 of %qs is out of range",
16054 "__builtin_paired_predicate");
16055 return const0_rtx;
16058 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16059 emit_move_insn (target, tmp);
16060 return target;
16063 /* Raise an error message for a builtin function that is called without the
16064 appropriate target options being set. */
16066 static void
16067 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16069 size_t uns_fncode = (size_t) fncode;
16070 const char *name = rs6000_builtin_info[uns_fncode].name;
16071 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16073 gcc_assert (name != NULL);
16074 if ((fnmask & RS6000_BTM_CELL) != 0)
16075 error ("builtin function %qs is only valid for the cell processor", name);
16076 else if ((fnmask & RS6000_BTM_VSX) != 0)
16077 error ("builtin function %qs requires the %qs option", name, "-mvsx");
16078 else if ((fnmask & RS6000_BTM_HTM) != 0)
16079 error ("builtin function %qs requires the %qs option", name, "-mhtm");
16080 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16081 error ("builtin function %qs requires the %qs option", name, "-maltivec");
16082 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16083 error ("builtin function %qs requires the %qs option", name, "-mpaired");
16084 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16085 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16086 error ("builtin function %qs requires the %qs and %qs options",
16087 name, "-mhard-dfp", "-mpower8-vector");
16088 else if ((fnmask & RS6000_BTM_DFP) != 0)
16089 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
16090 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16091 error ("builtin function %qs requires the %qs option", name,
16092 "-mpower8-vector");
16093 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16094 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16095 error ("builtin function %qs requires the %qs and %qs options",
16096 name, "-mcpu=power9", "-m64");
16097 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16098 error ("builtin function %qs requires the %qs option", name,
16099 "-mcpu=power9");
16100 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16101 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16102 error ("builtin function %qs requires the %qs and %qs options",
16103 name, "-mcpu=power9", "-m64");
16104 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16105 error ("builtin function %qs requires the %qs option", name,
16106 "-mcpu=power9");
16107 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16108 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16109 error ("builtin function %qs requires the %qs and %qs options",
16110 name, "-mhard-float", "-mlong-double-128");
16111 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16112 error ("builtin function %qs requires the %qs option", name,
16113 "-mhard-float");
16114 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16115 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
16116 else
16117 error ("builtin function %qs is not supported with the current options",
16118 name);
16121 /* Target hook for early folding of built-ins, shamelessly stolen
16122 from ia64.c. */
16124 static tree
16125 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16126 tree *args, bool ignore ATTRIBUTE_UNUSED)
16128 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16130 enum rs6000_builtins fn_code
16131 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16132 switch (fn_code)
16134 case RS6000_BUILTIN_NANQ:
16135 case RS6000_BUILTIN_NANSQ:
16137 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16138 const char *str = c_getstr (*args);
16139 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16140 REAL_VALUE_TYPE real;
16142 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16143 return build_real (type, real);
16144 return NULL_TREE;
16146 case RS6000_BUILTIN_INFQ:
16147 case RS6000_BUILTIN_HUGE_VALQ:
16149 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16150 REAL_VALUE_TYPE inf;
16151 real_inf (&inf);
16152 return build_real (type, inf);
16154 default:
16155 break;
16158 #ifdef SUBTARGET_FOLD_BUILTIN
16159 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16160 #else
16161 return NULL_TREE;
16162 #endif
16165 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16166 a constant, use rs6000_fold_builtin.) */
16168 bool
16169 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16171 gimple *stmt = gsi_stmt (*gsi);
16172 tree fndecl = gimple_call_fndecl (stmt);
16173 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16174 enum rs6000_builtins fn_code
16175 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16176 tree arg0, arg1, lhs;
16178 size_t uns_fncode = (size_t) fn_code;
16179 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
16180 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
16181 const char *fn_name2 = (icode != CODE_FOR_nothing)
16182 ? get_insn_name ((int) icode)
16183 : "nothing";
16185 if (TARGET_DEBUG_BUILTIN)
16186 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
16187 fn_code, fn_name1, fn_name2);
16189 if (!rs6000_fold_gimple)
16190 return false;
16192 /* Generic solution to prevent gimple folding of code without a LHS. */
16193 if (!gimple_call_lhs (stmt))
16194 return false;
16196 switch (fn_code)
16198 /* Flavors of vec_add. We deliberately don't expand
16199 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16200 TImode, resulting in much poorer code generation. */
16201 case ALTIVEC_BUILTIN_VADDUBM:
16202 case ALTIVEC_BUILTIN_VADDUHM:
16203 case ALTIVEC_BUILTIN_VADDUWM:
16204 case P8V_BUILTIN_VADDUDM:
16205 case ALTIVEC_BUILTIN_VADDFP:
16206 case VSX_BUILTIN_XVADDDP:
16208 arg0 = gimple_call_arg (stmt, 0);
16209 arg1 = gimple_call_arg (stmt, 1);
16210 lhs = gimple_call_lhs (stmt);
16211 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16212 gimple_set_location (g, gimple_location (stmt));
16213 gsi_replace (gsi, g, true);
16214 return true;
16216 /* Flavors of vec_sub. We deliberately don't expand
16217 P8V_BUILTIN_VSUBUQM. */
16218 case ALTIVEC_BUILTIN_VSUBUBM:
16219 case ALTIVEC_BUILTIN_VSUBUHM:
16220 case ALTIVEC_BUILTIN_VSUBUWM:
16221 case P8V_BUILTIN_VSUBUDM:
16222 case ALTIVEC_BUILTIN_VSUBFP:
16223 case VSX_BUILTIN_XVSUBDP:
16225 arg0 = gimple_call_arg (stmt, 0);
16226 arg1 = gimple_call_arg (stmt, 1);
16227 lhs = gimple_call_lhs (stmt);
16228 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16229 gimple_set_location (g, gimple_location (stmt));
16230 gsi_replace (gsi, g, true);
16231 return true;
16233 case VSX_BUILTIN_XVMULSP:
16234 case VSX_BUILTIN_XVMULDP:
16236 arg0 = gimple_call_arg (stmt, 0);
16237 arg1 = gimple_call_arg (stmt, 1);
16238 lhs = gimple_call_lhs (stmt);
16239 gimple *g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
16240 gimple_set_location (g, gimple_location (stmt));
16241 gsi_replace (gsi, g, true);
16242 return true;
16244 /* Even element flavors of vec_mul (signed). */
16245 case ALTIVEC_BUILTIN_VMULESB:
16246 case ALTIVEC_BUILTIN_VMULESH:
16247 case ALTIVEC_BUILTIN_VMULESW:
16248 /* Even element flavors of vec_mul (unsigned). */
16249 case ALTIVEC_BUILTIN_VMULEUB:
16250 case ALTIVEC_BUILTIN_VMULEUH:
16251 case ALTIVEC_BUILTIN_VMULEUW:
16253 arg0 = gimple_call_arg (stmt, 0);
16254 arg1 = gimple_call_arg (stmt, 1);
16255 lhs = gimple_call_lhs (stmt);
16256 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16257 gimple_set_location (g, gimple_location (stmt));
16258 gsi_replace (gsi, g, true);
16259 return true;
16261 /* Odd element flavors of vec_mul (signed). */
16262 case ALTIVEC_BUILTIN_VMULOSB:
16263 case ALTIVEC_BUILTIN_VMULOSH:
16264 case ALTIVEC_BUILTIN_VMULOSW:
16265 /* Odd element flavors of vec_mul (unsigned). */
16266 case ALTIVEC_BUILTIN_VMULOUB:
16267 case ALTIVEC_BUILTIN_VMULOUH:
16268 case ALTIVEC_BUILTIN_VMULOUW:
16270 arg0 = gimple_call_arg (stmt, 0);
16271 arg1 = gimple_call_arg (stmt, 1);
16272 lhs = gimple_call_lhs (stmt);
16273 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16274 gimple_set_location (g, gimple_location (stmt));
16275 gsi_replace (gsi, g, true);
16276 return true;
16278 /* Flavors of vec_div (Integer). */
16279 case VSX_BUILTIN_DIV_V2DI:
16280 case VSX_BUILTIN_UDIV_V2DI:
16282 arg0 = gimple_call_arg (stmt, 0);
16283 arg1 = gimple_call_arg (stmt, 1);
16284 lhs = gimple_call_lhs (stmt);
16285 gimple *g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
16286 gimple_set_location (g, gimple_location (stmt));
16287 gsi_replace (gsi, g, true);
16288 return true;
16290 /* Flavors of vec_div (Float). */
16291 case VSX_BUILTIN_XVDIVSP:
16292 case VSX_BUILTIN_XVDIVDP:
16294 arg0 = gimple_call_arg (stmt, 0);
16295 arg1 = gimple_call_arg (stmt, 1);
16296 lhs = gimple_call_lhs (stmt);
16297 gimple *g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
16298 gimple_set_location (g, gimple_location (stmt));
16299 gsi_replace (gsi, g, true);
16300 return true;
16302 /* Flavors of vec_and. */
16303 case ALTIVEC_BUILTIN_VAND:
16305 arg0 = gimple_call_arg (stmt, 0);
16306 arg1 = gimple_call_arg (stmt, 1);
16307 lhs = gimple_call_lhs (stmt);
16308 gimple *g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
16309 gimple_set_location (g, gimple_location (stmt));
16310 gsi_replace (gsi, g, true);
16311 return true;
16313 /* Flavors of vec_andc. */
16314 case ALTIVEC_BUILTIN_VANDC:
16316 arg0 = gimple_call_arg (stmt, 0);
16317 arg1 = gimple_call_arg (stmt, 1);
16318 lhs = gimple_call_lhs (stmt);
16319 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16320 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16321 gimple_set_location (g, gimple_location (stmt));
16322 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16323 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
16324 gimple_set_location (g, gimple_location (stmt));
16325 gsi_replace (gsi, g, true);
16326 return true;
16328 /* Flavors of vec_nand. */
16329 case P8V_BUILTIN_VEC_NAND:
16330 case P8V_BUILTIN_NAND_V16QI:
16331 case P8V_BUILTIN_NAND_V8HI:
16332 case P8V_BUILTIN_NAND_V4SI:
16333 case P8V_BUILTIN_NAND_V4SF:
16334 case P8V_BUILTIN_NAND_V2DF:
16335 case P8V_BUILTIN_NAND_V2DI:
16337 arg0 = gimple_call_arg (stmt, 0);
16338 arg1 = gimple_call_arg (stmt, 1);
16339 lhs = gimple_call_lhs (stmt);
16340 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16341 gimple *g = gimple_build_assign(temp, BIT_AND_EXPR, arg0, arg1);
16342 gimple_set_location (g, gimple_location (stmt));
16343 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16344 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16345 gimple_set_location (g, gimple_location (stmt));
16346 gsi_replace (gsi, g, true);
16347 return true;
16349 /* Flavors of vec_or. */
16350 case ALTIVEC_BUILTIN_VOR:
16352 arg0 = gimple_call_arg (stmt, 0);
16353 arg1 = gimple_call_arg (stmt, 1);
16354 lhs = gimple_call_lhs (stmt);
16355 gimple *g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
16356 gimple_set_location (g, gimple_location (stmt));
16357 gsi_replace (gsi, g, true);
16358 return true;
16360 /* flavors of vec_orc. */
16361 case P8V_BUILTIN_ORC_V16QI:
16362 case P8V_BUILTIN_ORC_V8HI:
16363 case P8V_BUILTIN_ORC_V4SI:
16364 case P8V_BUILTIN_ORC_V4SF:
16365 case P8V_BUILTIN_ORC_V2DF:
16366 case P8V_BUILTIN_ORC_V2DI:
16368 arg0 = gimple_call_arg (stmt, 0);
16369 arg1 = gimple_call_arg (stmt, 1);
16370 lhs = gimple_call_lhs (stmt);
16371 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16372 gimple *g = gimple_build_assign(temp, BIT_NOT_EXPR, arg1);
16373 gimple_set_location (g, gimple_location (stmt));
16374 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16375 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
16376 gimple_set_location (g, gimple_location (stmt));
16377 gsi_replace (gsi, g, true);
16378 return true;
16380 /* Flavors of vec_xor. */
16381 case ALTIVEC_BUILTIN_VXOR:
16383 arg0 = gimple_call_arg (stmt, 0);
16384 arg1 = gimple_call_arg (stmt, 1);
16385 lhs = gimple_call_lhs (stmt);
16386 gimple *g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
16387 gimple_set_location (g, gimple_location (stmt));
16388 gsi_replace (gsi, g, true);
16389 return true;
16391 /* Flavors of vec_nor. */
16392 case ALTIVEC_BUILTIN_VNOR:
16394 arg0 = gimple_call_arg (stmt, 0);
16395 arg1 = gimple_call_arg (stmt, 1);
16396 lhs = gimple_call_lhs (stmt);
16397 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16398 gimple *g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
16399 gimple_set_location (g, gimple_location (stmt));
16400 gsi_insert_before(gsi, g, GSI_SAME_STMT);
16401 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16402 gimple_set_location (g, gimple_location (stmt));
16403 gsi_replace (gsi, g, true);
16404 return true;
16406 /* flavors of vec_abs. */
16407 case ALTIVEC_BUILTIN_ABS_V16QI:
16408 case ALTIVEC_BUILTIN_ABS_V8HI:
16409 case ALTIVEC_BUILTIN_ABS_V4SI:
16410 case ALTIVEC_BUILTIN_ABS_V4SF:
16411 case P8V_BUILTIN_ABS_V2DI:
16412 case VSX_BUILTIN_XVABSDP:
16414 arg0 = gimple_call_arg (stmt, 0);
16415 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16416 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16417 return false;
16418 lhs = gimple_call_lhs (stmt);
16419 gimple *g = gimple_build_assign (lhs, ABS_EXPR, arg0);
16420 gimple_set_location (g, gimple_location (stmt));
16421 gsi_replace (gsi, g, true);
16422 return true;
16424 /* flavors of vec_min. */
16425 case VSX_BUILTIN_XVMINDP:
16426 case P8V_BUILTIN_VMINSD:
16427 case P8V_BUILTIN_VMINUD:
16428 case ALTIVEC_BUILTIN_VMINSB:
16429 case ALTIVEC_BUILTIN_VMINSH:
16430 case ALTIVEC_BUILTIN_VMINSW:
16431 case ALTIVEC_BUILTIN_VMINUB:
16432 case ALTIVEC_BUILTIN_VMINUH:
16433 case ALTIVEC_BUILTIN_VMINUW:
16434 case ALTIVEC_BUILTIN_VMINFP:
16436 arg0 = gimple_call_arg (stmt, 0);
16437 arg1 = gimple_call_arg (stmt, 1);
16438 lhs = gimple_call_lhs (stmt);
16439 gimple *g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
16440 gimple_set_location (g, gimple_location (stmt));
16441 gsi_replace (gsi, g, true);
16442 return true;
16444 /* flavors of vec_max. */
16445 case VSX_BUILTIN_XVMAXDP:
16446 case P8V_BUILTIN_VMAXSD:
16447 case P8V_BUILTIN_VMAXUD:
16448 case ALTIVEC_BUILTIN_VMAXSB:
16449 case ALTIVEC_BUILTIN_VMAXSH:
16450 case ALTIVEC_BUILTIN_VMAXSW:
16451 case ALTIVEC_BUILTIN_VMAXUB:
16452 case ALTIVEC_BUILTIN_VMAXUH:
16453 case ALTIVEC_BUILTIN_VMAXUW:
16454 case ALTIVEC_BUILTIN_VMAXFP:
16456 arg0 = gimple_call_arg (stmt, 0);
16457 arg1 = gimple_call_arg (stmt, 1);
16458 lhs = gimple_call_lhs (stmt);
16459 gimple *g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
16460 gimple_set_location (g, gimple_location (stmt));
16461 gsi_replace (gsi, g, true);
16462 return true;
16464 /* Flavors of vec_eqv. */
16465 case P8V_BUILTIN_EQV_V16QI:
16466 case P8V_BUILTIN_EQV_V8HI:
16467 case P8V_BUILTIN_EQV_V4SI:
16468 case P8V_BUILTIN_EQV_V4SF:
16469 case P8V_BUILTIN_EQV_V2DF:
16470 case P8V_BUILTIN_EQV_V2DI:
16472 arg0 = gimple_call_arg (stmt, 0);
16473 arg1 = gimple_call_arg (stmt, 1);
16474 lhs = gimple_call_lhs (stmt);
16475 tree temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
16476 gimple *g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
16477 gimple_set_location (g, gimple_location (stmt));
16478 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16479 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
16480 gimple_set_location (g, gimple_location (stmt));
16481 gsi_replace (gsi, g, true);
16482 return true;
16484 /* Flavors of vec_rotate_left. */
16485 case ALTIVEC_BUILTIN_VRLB:
16486 case ALTIVEC_BUILTIN_VRLH:
16487 case ALTIVEC_BUILTIN_VRLW:
16488 case P8V_BUILTIN_VRLD:
16490 arg0 = gimple_call_arg (stmt, 0);
16491 arg1 = gimple_call_arg (stmt, 1);
16492 lhs = gimple_call_lhs (stmt);
16493 gimple *g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
16494 gimple_set_location (g, gimple_location (stmt));
16495 gsi_replace (gsi, g, true);
16496 return true;
16498 /* Flavors of vector shift right algebraic.
16499 vec_sra{b,h,w} -> vsra{b,h,w}. */
16500 case ALTIVEC_BUILTIN_VSRAB:
16501 case ALTIVEC_BUILTIN_VSRAH:
16502 case ALTIVEC_BUILTIN_VSRAW:
16503 case P8V_BUILTIN_VSRAD:
16505 arg0 = gimple_call_arg (stmt, 0);
16506 arg1 = gimple_call_arg (stmt, 1);
16507 lhs = gimple_call_lhs (stmt);
16508 gimple *g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
16509 gimple_set_location (g, gimple_location (stmt));
16510 gsi_replace (gsi, g, true);
16511 return true;
16513 /* Flavors of vector shift left.
16514 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
16515 case ALTIVEC_BUILTIN_VSLB:
16516 case ALTIVEC_BUILTIN_VSLH:
16517 case ALTIVEC_BUILTIN_VSLW:
16518 case P8V_BUILTIN_VSLD:
16520 arg0 = gimple_call_arg (stmt, 0);
16521 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
16522 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
16523 return false;
16524 arg1 = gimple_call_arg (stmt, 1);
16525 lhs = gimple_call_lhs (stmt);
16526 gimple *g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
16527 gimple_set_location (g, gimple_location (stmt));
16528 gsi_replace (gsi, g, true);
16529 return true;
16531 /* Flavors of vector shift right. */
16532 case ALTIVEC_BUILTIN_VSRB:
16533 case ALTIVEC_BUILTIN_VSRH:
16534 case ALTIVEC_BUILTIN_VSRW:
16535 case P8V_BUILTIN_VSRD:
16537 arg0 = gimple_call_arg (stmt, 0);
16538 arg1 = gimple_call_arg (stmt, 1);
16539 lhs = gimple_call_lhs (stmt);
16540 gimple_seq stmts = NULL;
16541 /* Convert arg0 to unsigned. */
16542 tree arg0_unsigned
16543 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
16544 unsigned_type_for (TREE_TYPE (arg0)), arg0);
16545 tree res
16546 = gimple_build (&stmts, RSHIFT_EXPR,
16547 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
16548 /* Convert result back to the lhs type. */
16549 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
16550 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16551 update_call_from_tree (gsi, res);
16552 return true;
16554 default:
16555 if (TARGET_DEBUG_BUILTIN)
16556 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16557 fn_code, fn_name1, fn_name2);
16558 break;
16561 return false;
16564 /* Expand an expression EXP that calls a built-in function,
16565 with result going to TARGET if that's convenient
16566 (and in mode MODE if that's convenient).
16567 SUBTARGET may be used as the target for computing one of EXP's operands.
16568 IGNORE is nonzero if the value is to be ignored. */
16570 static rtx
16571 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16572 machine_mode mode ATTRIBUTE_UNUSED,
16573 int ignore ATTRIBUTE_UNUSED)
16575 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16576 enum rs6000_builtins fcode
16577 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16578 size_t uns_fcode = (size_t)fcode;
16579 const struct builtin_description *d;
16580 size_t i;
16581 rtx ret;
16582 bool success;
16583 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16584 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16586 if (TARGET_DEBUG_BUILTIN)
16588 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16589 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16590 const char *name2 = (icode != CODE_FOR_nothing)
16591 ? get_insn_name ((int) icode)
16592 : "nothing";
16593 const char *name3;
16595 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16597 default: name3 = "unknown"; break;
16598 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16599 case RS6000_BTC_UNARY: name3 = "unary"; break;
16600 case RS6000_BTC_BINARY: name3 = "binary"; break;
16601 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16602 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16603 case RS6000_BTC_ABS: name3 = "abs"; break;
16604 case RS6000_BTC_DST: name3 = "dst"; break;
16608 fprintf (stderr,
16609 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16610 (name1) ? name1 : "---", fcode,
16611 (name2) ? name2 : "---", (int) icode,
16612 name3,
16613 func_valid_p ? "" : ", not valid");
16616 if (!func_valid_p)
16618 rs6000_invalid_builtin (fcode);
16620 /* Given it is invalid, just generate a normal call. */
16621 return expand_call (exp, target, ignore);
16624 switch (fcode)
16626 case RS6000_BUILTIN_RECIP:
16627 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16629 case RS6000_BUILTIN_RECIPF:
16630 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16632 case RS6000_BUILTIN_RSQRTF:
16633 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16635 case RS6000_BUILTIN_RSQRT:
16636 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16638 case POWER7_BUILTIN_BPERMD:
16639 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16640 ? CODE_FOR_bpermd_di
16641 : CODE_FOR_bpermd_si), exp, target);
16643 case RS6000_BUILTIN_GET_TB:
16644 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16645 target);
16647 case RS6000_BUILTIN_MFTB:
16648 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16649 ? CODE_FOR_rs6000_mftb_di
16650 : CODE_FOR_rs6000_mftb_si),
16651 target);
16653 case RS6000_BUILTIN_MFFS:
16654 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16656 case RS6000_BUILTIN_MTFSF:
16657 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16659 case RS6000_BUILTIN_CPU_INIT:
16660 case RS6000_BUILTIN_CPU_IS:
16661 case RS6000_BUILTIN_CPU_SUPPORTS:
16662 return cpu_expand_builtin (fcode, exp, target);
16664 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16665 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16667 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16668 : (int) CODE_FOR_altivec_lvsl_direct);
16669 machine_mode tmode = insn_data[icode].operand[0].mode;
16670 machine_mode mode = insn_data[icode].operand[1].mode;
16671 tree arg;
16672 rtx op, addr, pat;
16674 gcc_assert (TARGET_ALTIVEC);
16676 arg = CALL_EXPR_ARG (exp, 0);
16677 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16678 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16679 addr = memory_address (mode, op);
16680 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16681 op = addr;
16682 else
16684 /* For the load case need to negate the address. */
16685 op = gen_reg_rtx (GET_MODE (addr));
16686 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16688 op = gen_rtx_MEM (mode, op);
16690 if (target == 0
16691 || GET_MODE (target) != tmode
16692 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16693 target = gen_reg_rtx (tmode);
16695 pat = GEN_FCN (icode) (target, op);
16696 if (!pat)
16697 return 0;
16698 emit_insn (pat);
16700 return target;
16703 case ALTIVEC_BUILTIN_VCFUX:
16704 case ALTIVEC_BUILTIN_VCFSX:
16705 case ALTIVEC_BUILTIN_VCTUXS:
16706 case ALTIVEC_BUILTIN_VCTSXS:
16707 /* FIXME: There's got to be a nicer way to handle this case than
16708 constructing a new CALL_EXPR. */
16709 if (call_expr_nargs (exp) == 1)
16711 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16712 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16714 break;
16716 default:
16717 break;
16720 if (TARGET_ALTIVEC)
16722 ret = altivec_expand_builtin (exp, target, &success);
16724 if (success)
16725 return ret;
16727 if (TARGET_PAIRED_FLOAT)
16729 ret = paired_expand_builtin (exp, target, &success);
16731 if (success)
16732 return ret;
16734 if (TARGET_HTM)
16736 ret = htm_expand_builtin (exp, target, &success);
16738 if (success)
16739 return ret;
16742 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16743 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16744 gcc_assert (attr == RS6000_BTC_UNARY
16745 || attr == RS6000_BTC_BINARY
16746 || attr == RS6000_BTC_TERNARY
16747 || attr == RS6000_BTC_SPECIAL);
16749 /* Handle simple unary operations. */
16750 d = bdesc_1arg;
16751 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16752 if (d->code == fcode)
16753 return rs6000_expand_unop_builtin (d->icode, exp, target);
16755 /* Handle simple binary operations. */
16756 d = bdesc_2arg;
16757 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16758 if (d->code == fcode)
16759 return rs6000_expand_binop_builtin (d->icode, exp, target);
16761 /* Handle simple ternary operations. */
16762 d = bdesc_3arg;
16763 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16764 if (d->code == fcode)
16765 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16767 /* Handle simple no-argument operations. */
16768 d = bdesc_0arg;
16769 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16770 if (d->code == fcode)
16771 return rs6000_expand_zeroop_builtin (d->icode, target);
16773 gcc_unreachable ();
16776 /* Create a builtin vector type with a name. Taking care not to give
16777 the canonical type a name. */
16779 static tree
16780 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16782 tree result = build_vector_type (elt_type, num_elts);
16784 /* Copy so we don't give the canonical type a name. */
16785 result = build_variant_type_copy (result);
16787 add_builtin_type (name, result);
16789 return result;
16792 static void
16793 rs6000_init_builtins (void)
16795 tree tdecl;
16796 tree ftype;
16797 machine_mode mode;
16799 if (TARGET_DEBUG_BUILTIN)
16800 fprintf (stderr, "rs6000_init_builtins%s%s%s\n",
16801 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
16802 (TARGET_ALTIVEC) ? ", altivec" : "",
16803 (TARGET_VSX) ? ", vsx" : "");
16805 V2SI_type_node = build_vector_type (intSI_type_node, 2);
16806 V2SF_type_node = build_vector_type (float_type_node, 2);
16807 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16808 : "__vector long long",
16809 intDI_type_node, 2);
16810 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16811 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16812 intSI_type_node, 4);
16813 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16814 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16815 intHI_type_node, 8);
16816 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16817 intQI_type_node, 16);
16819 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16820 unsigned_intQI_type_node, 16);
16821 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16822 unsigned_intHI_type_node, 8);
16823 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16824 unsigned_intSI_type_node, 4);
16825 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16826 ? "__vector unsigned long"
16827 : "__vector unsigned long long",
16828 unsigned_intDI_type_node, 2);
16830 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
16831 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
16832 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
16833 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16835 const_str_type_node
16836 = build_pointer_type (build_qualified_type (char_type_node,
16837 TYPE_QUAL_CONST));
16839 /* We use V1TI mode as a special container to hold __int128_t items that
16840 must live in VSX registers. */
16841 if (intTI_type_node)
16843 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16844 intTI_type_node, 1);
16845 unsigned_V1TI_type_node
16846 = rs6000_vector_type ("__vector unsigned __int128",
16847 unsigned_intTI_type_node, 1);
16850 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16851 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16852 'vector unsigned short'. */
16854 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16855 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16856 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16857 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16858 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16860 long_integer_type_internal_node = long_integer_type_node;
16861 long_unsigned_type_internal_node = long_unsigned_type_node;
16862 long_long_integer_type_internal_node = long_long_integer_type_node;
16863 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16864 intQI_type_internal_node = intQI_type_node;
16865 uintQI_type_internal_node = unsigned_intQI_type_node;
16866 intHI_type_internal_node = intHI_type_node;
16867 uintHI_type_internal_node = unsigned_intHI_type_node;
16868 intSI_type_internal_node = intSI_type_node;
16869 uintSI_type_internal_node = unsigned_intSI_type_node;
16870 intDI_type_internal_node = intDI_type_node;
16871 uintDI_type_internal_node = unsigned_intDI_type_node;
16872 intTI_type_internal_node = intTI_type_node;
16873 uintTI_type_internal_node = unsigned_intTI_type_node;
16874 float_type_internal_node = float_type_node;
16875 double_type_internal_node = double_type_node;
16876 long_double_type_internal_node = long_double_type_node;
16877 dfloat64_type_internal_node = dfloat64_type_node;
16878 dfloat128_type_internal_node = dfloat128_type_node;
16879 void_type_internal_node = void_type_node;
16881 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16882 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16883 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16884 format that uses a pair of doubles, depending on the switches and
16885 defaults.
16887 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16888 floating point, we need make sure the type is non-zero or else self-test
16889 fails during bootstrap.
16891 We don't register a built-in type for __ibm128 if the type is the same as
16892 long double. Instead we add a #define for __ibm128 in
16893 rs6000_cpu_cpp_builtins to long double.
16895 For IEEE 128-bit floating point, always create the type __ieee128. If the
16896 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16897 __ieee128. */
16898 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
16900 ibm128_float_type_node = make_node (REAL_TYPE);
16901 TYPE_PRECISION (ibm128_float_type_node) = 128;
16902 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16903 layout_type (ibm128_float_type_node);
16905 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16906 "__ibm128");
16908 else
16909 ibm128_float_type_node = long_double_type_node;
16911 if (TARGET_FLOAT128_TYPE)
16913 ieee128_float_type_node = float128_type_node;
16914 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16915 "__ieee128");
16918 else
16919 ieee128_float_type_node = long_double_type_node;
16921 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16922 tree type node. */
16923 builtin_mode_to_type[QImode][0] = integer_type_node;
16924 builtin_mode_to_type[HImode][0] = integer_type_node;
16925 builtin_mode_to_type[SImode][0] = intSI_type_node;
16926 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16927 builtin_mode_to_type[DImode][0] = intDI_type_node;
16928 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16929 builtin_mode_to_type[TImode][0] = intTI_type_node;
16930 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16931 builtin_mode_to_type[SFmode][0] = float_type_node;
16932 builtin_mode_to_type[DFmode][0] = double_type_node;
16933 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16934 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16935 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16936 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16937 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16938 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16939 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16940 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
16941 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
16942 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16943 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16944 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16945 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16946 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16947 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16948 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16949 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16950 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16951 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16953 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16954 TYPE_NAME (bool_char_type_node) = tdecl;
16956 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16957 TYPE_NAME (bool_short_type_node) = tdecl;
16959 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16960 TYPE_NAME (bool_int_type_node) = tdecl;
16962 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16963 TYPE_NAME (pixel_type_node) = tdecl;
16965 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16966 bool_char_type_node, 16);
16967 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16968 bool_short_type_node, 8);
16969 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16970 bool_int_type_node, 4);
16971 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16972 ? "__vector __bool long"
16973 : "__vector __bool long long",
16974 bool_long_type_node, 2);
16975 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16976 pixel_type_node, 8);
16978 /* Paired builtins are only available if you build a compiler with the
16979 appropriate options, so only create those builtins with the appropriate
16980 compiler option. Create Altivec and VSX builtins on machines with at
16981 least the general purpose extensions (970 and newer) to allow the use of
16982 the target attribute. */
16983 if (TARGET_PAIRED_FLOAT)
16984 paired_init_builtins ();
16985 if (TARGET_EXTRA_BUILTINS)
16986 altivec_init_builtins ();
16987 if (TARGET_HTM)
16988 htm_init_builtins ();
16990 if (TARGET_EXTRA_BUILTINS || TARGET_PAIRED_FLOAT)
16991 rs6000_common_init_builtins ();
16993 ftype = build_function_type_list (ieee128_float_type_node,
16994 const_str_type_node, NULL_TREE);
16995 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
16996 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
16998 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
16999 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
17000 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
17002 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17003 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17004 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17006 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17007 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17008 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17010 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17011 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17012 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17014 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17015 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17016 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17018 mode = (TARGET_64BIT) ? DImode : SImode;
17019 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17020 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17021 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17023 ftype = build_function_type_list (unsigned_intDI_type_node,
17024 NULL_TREE);
17025 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17027 if (TARGET_64BIT)
17028 ftype = build_function_type_list (unsigned_intDI_type_node,
17029 NULL_TREE);
17030 else
17031 ftype = build_function_type_list (unsigned_intSI_type_node,
17032 NULL_TREE);
17033 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17035 ftype = build_function_type_list (double_type_node, NULL_TREE);
17036 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17038 ftype = build_function_type_list (void_type_node,
17039 intSI_type_node, double_type_node,
17040 NULL_TREE);
17041 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17043 ftype = build_function_type_list (void_type_node, NULL_TREE);
17044 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17046 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17047 NULL_TREE);
17048 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17049 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17051 /* AIX libm provides clog as __clog. */
17052 if (TARGET_XCOFF &&
17053 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17054 set_user_assembler_name (tdecl, "__clog");
17056 #ifdef SUBTARGET_INIT_BUILTINS
17057 SUBTARGET_INIT_BUILTINS;
17058 #endif
17061 /* Returns the rs6000 builtin decl for CODE. */
17063 static tree
17064 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17066 HOST_WIDE_INT fnmask;
17068 if (code >= RS6000_BUILTIN_COUNT)
17069 return error_mark_node;
17071 fnmask = rs6000_builtin_info[code].mask;
17072 if ((fnmask & rs6000_builtin_mask) != fnmask)
17074 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17075 return error_mark_node;
17078 return rs6000_builtin_decls[code];
17081 static void
17082 paired_init_builtins (void)
17084 const struct builtin_description *d;
17085 size_t i;
17086 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17088 tree int_ftype_int_v2sf_v2sf
17089 = build_function_type_list (integer_type_node,
17090 integer_type_node,
17091 V2SF_type_node,
17092 V2SF_type_node,
17093 NULL_TREE);
17094 tree pcfloat_type_node =
17095 build_pointer_type (build_qualified_type
17096 (float_type_node, TYPE_QUAL_CONST));
17098 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17099 long_integer_type_node,
17100 pcfloat_type_node,
17101 NULL_TREE);
17102 tree void_ftype_v2sf_long_pcfloat =
17103 build_function_type_list (void_type_node,
17104 V2SF_type_node,
17105 long_integer_type_node,
17106 pcfloat_type_node,
17107 NULL_TREE);
17110 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17111 PAIRED_BUILTIN_LX);
17114 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17115 PAIRED_BUILTIN_STX);
17117 /* Predicates. */
17118 d = bdesc_paired_preds;
17119 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17121 tree type;
17122 HOST_WIDE_INT mask = d->mask;
17124 if ((mask & builtin_mask) != mask)
17126 if (TARGET_DEBUG_BUILTIN)
17127 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17128 d->name);
17129 continue;
17132 /* Cannot define builtin if the instruction is disabled. */
17133 gcc_assert (d->icode != CODE_FOR_nothing);
17135 if (TARGET_DEBUG_BUILTIN)
17136 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17137 (int)i, get_insn_name (d->icode), (int)d->icode,
17138 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17140 switch (insn_data[d->icode].operand[1].mode)
17142 case E_V2SFmode:
17143 type = int_ftype_int_v2sf_v2sf;
17144 break;
17145 default:
17146 gcc_unreachable ();
17149 def_builtin (d->name, type, d->code);
17153 static void
17154 altivec_init_builtins (void)
17156 const struct builtin_description *d;
17157 size_t i;
17158 tree ftype;
17159 tree decl;
17160 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17162 tree pvoid_type_node = build_pointer_type (void_type_node);
17164 tree pcvoid_type_node
17165 = build_pointer_type (build_qualified_type (void_type_node,
17166 TYPE_QUAL_CONST));
17168 tree int_ftype_opaque
17169 = build_function_type_list (integer_type_node,
17170 opaque_V4SI_type_node, NULL_TREE);
17171 tree opaque_ftype_opaque
17172 = build_function_type_list (integer_type_node, NULL_TREE);
17173 tree opaque_ftype_opaque_int
17174 = build_function_type_list (opaque_V4SI_type_node,
17175 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17176 tree opaque_ftype_opaque_opaque_int
17177 = build_function_type_list (opaque_V4SI_type_node,
17178 opaque_V4SI_type_node, opaque_V4SI_type_node,
17179 integer_type_node, NULL_TREE);
17180 tree opaque_ftype_opaque_opaque_opaque
17181 = build_function_type_list (opaque_V4SI_type_node,
17182 opaque_V4SI_type_node, opaque_V4SI_type_node,
17183 opaque_V4SI_type_node, NULL_TREE);
17184 tree opaque_ftype_opaque_opaque
17185 = build_function_type_list (opaque_V4SI_type_node,
17186 opaque_V4SI_type_node, opaque_V4SI_type_node,
17187 NULL_TREE);
17188 tree int_ftype_int_opaque_opaque
17189 = build_function_type_list (integer_type_node,
17190 integer_type_node, opaque_V4SI_type_node,
17191 opaque_V4SI_type_node, NULL_TREE);
17192 tree int_ftype_int_v4si_v4si
17193 = build_function_type_list (integer_type_node,
17194 integer_type_node, V4SI_type_node,
17195 V4SI_type_node, NULL_TREE);
17196 tree int_ftype_int_v2di_v2di
17197 = build_function_type_list (integer_type_node,
17198 integer_type_node, V2DI_type_node,
17199 V2DI_type_node, NULL_TREE);
17200 tree void_ftype_v4si
17201 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17202 tree v8hi_ftype_void
17203 = build_function_type_list (V8HI_type_node, NULL_TREE);
17204 tree void_ftype_void
17205 = build_function_type_list (void_type_node, NULL_TREE);
17206 tree void_ftype_int
17207 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17209 tree opaque_ftype_long_pcvoid
17210 = build_function_type_list (opaque_V4SI_type_node,
17211 long_integer_type_node, pcvoid_type_node,
17212 NULL_TREE);
17213 tree v16qi_ftype_long_pcvoid
17214 = build_function_type_list (V16QI_type_node,
17215 long_integer_type_node, pcvoid_type_node,
17216 NULL_TREE);
17217 tree v8hi_ftype_long_pcvoid
17218 = build_function_type_list (V8HI_type_node,
17219 long_integer_type_node, pcvoid_type_node,
17220 NULL_TREE);
17221 tree v4si_ftype_long_pcvoid
17222 = build_function_type_list (V4SI_type_node,
17223 long_integer_type_node, pcvoid_type_node,
17224 NULL_TREE);
17225 tree v4sf_ftype_long_pcvoid
17226 = build_function_type_list (V4SF_type_node,
17227 long_integer_type_node, pcvoid_type_node,
17228 NULL_TREE);
17229 tree v2df_ftype_long_pcvoid
17230 = build_function_type_list (V2DF_type_node,
17231 long_integer_type_node, pcvoid_type_node,
17232 NULL_TREE);
17233 tree v2di_ftype_long_pcvoid
17234 = build_function_type_list (V2DI_type_node,
17235 long_integer_type_node, pcvoid_type_node,
17236 NULL_TREE);
17238 tree void_ftype_opaque_long_pvoid
17239 = build_function_type_list (void_type_node,
17240 opaque_V4SI_type_node, long_integer_type_node,
17241 pvoid_type_node, NULL_TREE);
17242 tree void_ftype_v4si_long_pvoid
17243 = build_function_type_list (void_type_node,
17244 V4SI_type_node, long_integer_type_node,
17245 pvoid_type_node, NULL_TREE);
17246 tree void_ftype_v16qi_long_pvoid
17247 = build_function_type_list (void_type_node,
17248 V16QI_type_node, long_integer_type_node,
17249 pvoid_type_node, NULL_TREE);
17251 tree void_ftype_v16qi_pvoid_long
17252 = build_function_type_list (void_type_node,
17253 V16QI_type_node, pvoid_type_node,
17254 long_integer_type_node, NULL_TREE);
17256 tree void_ftype_v8hi_long_pvoid
17257 = build_function_type_list (void_type_node,
17258 V8HI_type_node, long_integer_type_node,
17259 pvoid_type_node, NULL_TREE);
17260 tree void_ftype_v4sf_long_pvoid
17261 = build_function_type_list (void_type_node,
17262 V4SF_type_node, long_integer_type_node,
17263 pvoid_type_node, NULL_TREE);
17264 tree void_ftype_v2df_long_pvoid
17265 = build_function_type_list (void_type_node,
17266 V2DF_type_node, long_integer_type_node,
17267 pvoid_type_node, NULL_TREE);
17268 tree void_ftype_v2di_long_pvoid
17269 = build_function_type_list (void_type_node,
17270 V2DI_type_node, long_integer_type_node,
17271 pvoid_type_node, NULL_TREE);
17272 tree int_ftype_int_v8hi_v8hi
17273 = build_function_type_list (integer_type_node,
17274 integer_type_node, V8HI_type_node,
17275 V8HI_type_node, NULL_TREE);
17276 tree int_ftype_int_v16qi_v16qi
17277 = build_function_type_list (integer_type_node,
17278 integer_type_node, V16QI_type_node,
17279 V16QI_type_node, NULL_TREE);
17280 tree int_ftype_int_v4sf_v4sf
17281 = build_function_type_list (integer_type_node,
17282 integer_type_node, V4SF_type_node,
17283 V4SF_type_node, NULL_TREE);
17284 tree int_ftype_int_v2df_v2df
17285 = build_function_type_list (integer_type_node,
17286 integer_type_node, V2DF_type_node,
17287 V2DF_type_node, NULL_TREE);
17288 tree v2di_ftype_v2di
17289 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17290 tree v4si_ftype_v4si
17291 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17292 tree v8hi_ftype_v8hi
17293 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17294 tree v16qi_ftype_v16qi
17295 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17296 tree v4sf_ftype_v4sf
17297 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17298 tree v2df_ftype_v2df
17299 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17300 tree void_ftype_pcvoid_int_int
17301 = build_function_type_list (void_type_node,
17302 pcvoid_type_node, integer_type_node,
17303 integer_type_node, NULL_TREE);
17305 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17306 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17307 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17308 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17309 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17310 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17311 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17312 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17313 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17314 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17315 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17316 ALTIVEC_BUILTIN_LVXL_V2DF);
17317 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17318 ALTIVEC_BUILTIN_LVXL_V2DI);
17319 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17320 ALTIVEC_BUILTIN_LVXL_V4SF);
17321 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17322 ALTIVEC_BUILTIN_LVXL_V4SI);
17323 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17324 ALTIVEC_BUILTIN_LVXL_V8HI);
17325 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17326 ALTIVEC_BUILTIN_LVXL_V16QI);
17327 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17328 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17329 ALTIVEC_BUILTIN_LVX_V2DF);
17330 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17331 ALTIVEC_BUILTIN_LVX_V2DI);
17332 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17333 ALTIVEC_BUILTIN_LVX_V4SF);
17334 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17335 ALTIVEC_BUILTIN_LVX_V4SI);
17336 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17337 ALTIVEC_BUILTIN_LVX_V8HI);
17338 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17339 ALTIVEC_BUILTIN_LVX_V16QI);
17340 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17341 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17342 ALTIVEC_BUILTIN_STVX_V2DF);
17343 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17344 ALTIVEC_BUILTIN_STVX_V2DI);
17345 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17346 ALTIVEC_BUILTIN_STVX_V4SF);
17347 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17348 ALTIVEC_BUILTIN_STVX_V4SI);
17349 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17350 ALTIVEC_BUILTIN_STVX_V8HI);
17351 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17352 ALTIVEC_BUILTIN_STVX_V16QI);
17353 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17354 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17355 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17356 ALTIVEC_BUILTIN_STVXL_V2DF);
17357 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17358 ALTIVEC_BUILTIN_STVXL_V2DI);
17359 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17360 ALTIVEC_BUILTIN_STVXL_V4SF);
17361 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17362 ALTIVEC_BUILTIN_STVXL_V4SI);
17363 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17364 ALTIVEC_BUILTIN_STVXL_V8HI);
17365 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17366 ALTIVEC_BUILTIN_STVXL_V16QI);
17367 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17368 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17369 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17370 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17371 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17372 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17373 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17374 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17375 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17376 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17377 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17378 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17379 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17380 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17381 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17382 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17384 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17385 VSX_BUILTIN_LXVD2X_V2DF);
17386 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17387 VSX_BUILTIN_LXVD2X_V2DI);
17388 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17389 VSX_BUILTIN_LXVW4X_V4SF);
17390 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17391 VSX_BUILTIN_LXVW4X_V4SI);
17392 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17393 VSX_BUILTIN_LXVW4X_V8HI);
17394 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17395 VSX_BUILTIN_LXVW4X_V16QI);
17396 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17397 VSX_BUILTIN_STXVD2X_V2DF);
17398 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17399 VSX_BUILTIN_STXVD2X_V2DI);
17400 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17401 VSX_BUILTIN_STXVW4X_V4SF);
17402 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17403 VSX_BUILTIN_STXVW4X_V4SI);
17404 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17405 VSX_BUILTIN_STXVW4X_V8HI);
17406 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17407 VSX_BUILTIN_STXVW4X_V16QI);
17409 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17410 VSX_BUILTIN_LD_ELEMREV_V2DF);
17411 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17412 VSX_BUILTIN_LD_ELEMREV_V2DI);
17413 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17414 VSX_BUILTIN_LD_ELEMREV_V4SF);
17415 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17416 VSX_BUILTIN_LD_ELEMREV_V4SI);
17417 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17418 VSX_BUILTIN_ST_ELEMREV_V2DF);
17419 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17420 VSX_BUILTIN_ST_ELEMREV_V2DI);
17421 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17422 VSX_BUILTIN_ST_ELEMREV_V4SF);
17423 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17424 VSX_BUILTIN_ST_ELEMREV_V4SI);
17426 def_builtin ("__builtin_vsx_le_be_v8hi", v8hi_ftype_long_pcvoid,
17427 VSX_BUILTIN_XL_BE_V8HI);
17428 def_builtin ("__builtin_vsx_le_be_v4si", v4si_ftype_long_pcvoid,
17429 VSX_BUILTIN_XL_BE_V4SI);
17430 def_builtin ("__builtin_vsx_le_be_v2di", v2di_ftype_long_pcvoid,
17431 VSX_BUILTIN_XL_BE_V2DI);
17432 def_builtin ("__builtin_vsx_le_be_v4sf", v4sf_ftype_long_pcvoid,
17433 VSX_BUILTIN_XL_BE_V4SF);
17434 def_builtin ("__builtin_vsx_le_be_v2df", v2df_ftype_long_pcvoid,
17435 VSX_BUILTIN_XL_BE_V2DF);
17436 def_builtin ("__builtin_vsx_le_be_v16qi", v16qi_ftype_long_pcvoid,
17437 VSX_BUILTIN_XL_BE_V16QI);
17439 if (TARGET_P9_VECTOR)
17441 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17442 VSX_BUILTIN_LD_ELEMREV_V8HI);
17443 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17444 VSX_BUILTIN_LD_ELEMREV_V16QI);
17445 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17446 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17447 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17448 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17450 else
17452 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V8HI]
17453 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V8HI];
17454 rs6000_builtin_decls[(int) VSX_BUILTIN_LD_ELEMREV_V16QI]
17455 = rs6000_builtin_decls[(int) VSX_BUILTIN_LXVW4X_V16QI];
17456 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V8HI]
17457 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V8HI];
17458 rs6000_builtin_decls[(int) VSX_BUILTIN_ST_ELEMREV_V16QI]
17459 = rs6000_builtin_decls[(int) VSX_BUILTIN_STXVW4X_V16QI];
17462 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17463 VSX_BUILTIN_VEC_LD);
17464 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17465 VSX_BUILTIN_VEC_ST);
17466 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17467 VSX_BUILTIN_VEC_XL);
17468 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17469 VSX_BUILTIN_VEC_XL_BE);
17470 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17471 VSX_BUILTIN_VEC_XST);
17473 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17474 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17475 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17477 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17478 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17479 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17480 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17481 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17482 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17483 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17484 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17485 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17486 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17487 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17488 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17490 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17491 ALTIVEC_BUILTIN_VEC_ADDE);
17492 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17493 ALTIVEC_BUILTIN_VEC_ADDEC);
17494 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17495 ALTIVEC_BUILTIN_VEC_CMPNE);
17496 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17497 ALTIVEC_BUILTIN_VEC_MUL);
17498 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17499 ALTIVEC_BUILTIN_VEC_SUBE);
17500 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17501 ALTIVEC_BUILTIN_VEC_SUBEC);
17503 /* Cell builtins. */
17504 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17505 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17506 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17507 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17509 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17510 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17511 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17512 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17514 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17515 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17516 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17517 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17519 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17520 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17521 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17522 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17524 if (TARGET_P9_VECTOR)
17525 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17526 P9V_BUILTIN_STXVL);
17528 /* Add the DST variants. */
17529 d = bdesc_dst;
17530 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17532 HOST_WIDE_INT mask = d->mask;
17534 /* It is expected that these dst built-in functions may have
17535 d->icode equal to CODE_FOR_nothing. */
17536 if ((mask & builtin_mask) != mask)
17538 if (TARGET_DEBUG_BUILTIN)
17539 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17540 d->name);
17541 continue;
17543 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17546 /* Initialize the predicates. */
17547 d = bdesc_altivec_preds;
17548 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17550 machine_mode mode1;
17551 tree type;
17552 HOST_WIDE_INT mask = d->mask;
17554 if ((mask & builtin_mask) != mask)
17556 if (TARGET_DEBUG_BUILTIN)
17557 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17558 d->name);
17559 continue;
17562 if (rs6000_overloaded_builtin_p (d->code))
17563 mode1 = VOIDmode;
17564 else
17566 /* Cannot define builtin if the instruction is disabled. */
17567 gcc_assert (d->icode != CODE_FOR_nothing);
17568 mode1 = insn_data[d->icode].operand[1].mode;
17571 switch (mode1)
17573 case E_VOIDmode:
17574 type = int_ftype_int_opaque_opaque;
17575 break;
17576 case E_V2DImode:
17577 type = int_ftype_int_v2di_v2di;
17578 break;
17579 case E_V4SImode:
17580 type = int_ftype_int_v4si_v4si;
17581 break;
17582 case E_V8HImode:
17583 type = int_ftype_int_v8hi_v8hi;
17584 break;
17585 case E_V16QImode:
17586 type = int_ftype_int_v16qi_v16qi;
17587 break;
17588 case E_V4SFmode:
17589 type = int_ftype_int_v4sf_v4sf;
17590 break;
17591 case E_V2DFmode:
17592 type = int_ftype_int_v2df_v2df;
17593 break;
17594 default:
17595 gcc_unreachable ();
17598 def_builtin (d->name, type, d->code);
17601 /* Initialize the abs* operators. */
17602 d = bdesc_abs;
17603 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17605 machine_mode mode0;
17606 tree type;
17607 HOST_WIDE_INT mask = d->mask;
17609 if ((mask & builtin_mask) != mask)
17611 if (TARGET_DEBUG_BUILTIN)
17612 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17613 d->name);
17614 continue;
17617 /* Cannot define builtin if the instruction is disabled. */
17618 gcc_assert (d->icode != CODE_FOR_nothing);
17619 mode0 = insn_data[d->icode].operand[0].mode;
17621 switch (mode0)
17623 case E_V2DImode:
17624 type = v2di_ftype_v2di;
17625 break;
17626 case E_V4SImode:
17627 type = v4si_ftype_v4si;
17628 break;
17629 case E_V8HImode:
17630 type = v8hi_ftype_v8hi;
17631 break;
17632 case E_V16QImode:
17633 type = v16qi_ftype_v16qi;
17634 break;
17635 case E_V4SFmode:
17636 type = v4sf_ftype_v4sf;
17637 break;
17638 case E_V2DFmode:
17639 type = v2df_ftype_v2df;
17640 break;
17641 default:
17642 gcc_unreachable ();
17645 def_builtin (d->name, type, d->code);
17648 /* Initialize target builtin that implements
17649 targetm.vectorize.builtin_mask_for_load. */
17651 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17652 v16qi_ftype_long_pcvoid,
17653 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17654 BUILT_IN_MD, NULL, NULL_TREE);
17655 TREE_READONLY (decl) = 1;
17656 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17657 altivec_builtin_mask_for_load = decl;
17659 /* Access to the vec_init patterns. */
17660 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17661 integer_type_node, integer_type_node,
17662 integer_type_node, NULL_TREE);
17663 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17665 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17666 short_integer_type_node,
17667 short_integer_type_node,
17668 short_integer_type_node,
17669 short_integer_type_node,
17670 short_integer_type_node,
17671 short_integer_type_node,
17672 short_integer_type_node, NULL_TREE);
17673 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17675 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17676 char_type_node, char_type_node,
17677 char_type_node, char_type_node,
17678 char_type_node, char_type_node,
17679 char_type_node, char_type_node,
17680 char_type_node, char_type_node,
17681 char_type_node, char_type_node,
17682 char_type_node, char_type_node,
17683 char_type_node, NULL_TREE);
17684 def_builtin ("__builtin_vec_init_v16qi", ftype,
17685 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17687 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17688 float_type_node, float_type_node,
17689 float_type_node, NULL_TREE);
17690 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17692 /* VSX builtins. */
17693 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17694 double_type_node, NULL_TREE);
17695 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17697 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17698 intDI_type_node, NULL_TREE);
17699 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17701 /* Access to the vec_set patterns. */
17702 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17703 intSI_type_node,
17704 integer_type_node, NULL_TREE);
17705 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17707 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17708 intHI_type_node,
17709 integer_type_node, NULL_TREE);
17710 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17712 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17713 intQI_type_node,
17714 integer_type_node, NULL_TREE);
17715 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17717 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17718 float_type_node,
17719 integer_type_node, NULL_TREE);
17720 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17722 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17723 double_type_node,
17724 integer_type_node, NULL_TREE);
17725 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17727 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17728 intDI_type_node,
17729 integer_type_node, NULL_TREE);
17730 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17732 /* Access to the vec_extract patterns. */
17733 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17734 integer_type_node, NULL_TREE);
17735 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17737 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17738 integer_type_node, NULL_TREE);
17739 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17741 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17742 integer_type_node, NULL_TREE);
17743 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17745 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17746 integer_type_node, NULL_TREE);
17747 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17749 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17750 integer_type_node, NULL_TREE);
17751 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17753 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17754 integer_type_node, NULL_TREE);
17755 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17758 if (V1TI_type_node)
17760 tree v1ti_ftype_long_pcvoid
17761 = build_function_type_list (V1TI_type_node,
17762 long_integer_type_node, pcvoid_type_node,
17763 NULL_TREE);
17764 tree void_ftype_v1ti_long_pvoid
17765 = build_function_type_list (void_type_node,
17766 V1TI_type_node, long_integer_type_node,
17767 pvoid_type_node, NULL_TREE);
17768 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17769 VSX_BUILTIN_LXVD2X_V1TI);
17770 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17771 VSX_BUILTIN_STXVD2X_V1TI);
17772 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17773 NULL_TREE, NULL_TREE);
17774 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17775 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17776 intTI_type_node,
17777 integer_type_node, NULL_TREE);
17778 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17779 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17780 integer_type_node, NULL_TREE);
17781 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17786 static void
17787 htm_init_builtins (void)
17789 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17790 const struct builtin_description *d;
17791 size_t i;
17793 d = bdesc_htm;
17794 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17796 tree op[MAX_HTM_OPERANDS], type;
17797 HOST_WIDE_INT mask = d->mask;
17798 unsigned attr = rs6000_builtin_info[d->code].attr;
17799 bool void_func = (attr & RS6000_BTC_VOID);
17800 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17801 int nopnds = 0;
17802 tree gpr_type_node;
17803 tree rettype;
17804 tree argtype;
17806 /* It is expected that these htm built-in functions may have
17807 d->icode equal to CODE_FOR_nothing. */
17809 if (TARGET_32BIT && TARGET_POWERPC64)
17810 gpr_type_node = long_long_unsigned_type_node;
17811 else
17812 gpr_type_node = long_unsigned_type_node;
17814 if (attr & RS6000_BTC_SPR)
17816 rettype = gpr_type_node;
17817 argtype = gpr_type_node;
17819 else if (d->code == HTM_BUILTIN_TABORTDC
17820 || d->code == HTM_BUILTIN_TABORTDCI)
17822 rettype = unsigned_type_node;
17823 argtype = gpr_type_node;
17825 else
17827 rettype = unsigned_type_node;
17828 argtype = unsigned_type_node;
17831 if ((mask & builtin_mask) != mask)
17833 if (TARGET_DEBUG_BUILTIN)
17834 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17835 continue;
17838 if (d->name == 0)
17840 if (TARGET_DEBUG_BUILTIN)
17841 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17842 (long unsigned) i);
17843 continue;
17846 op[nopnds++] = (void_func) ? void_type_node : rettype;
17848 if (attr_args == RS6000_BTC_UNARY)
17849 op[nopnds++] = argtype;
17850 else if (attr_args == RS6000_BTC_BINARY)
17852 op[nopnds++] = argtype;
17853 op[nopnds++] = argtype;
17855 else if (attr_args == RS6000_BTC_TERNARY)
17857 op[nopnds++] = argtype;
17858 op[nopnds++] = argtype;
17859 op[nopnds++] = argtype;
17862 switch (nopnds)
17864 case 1:
17865 type = build_function_type_list (op[0], NULL_TREE);
17866 break;
17867 case 2:
17868 type = build_function_type_list (op[0], op[1], NULL_TREE);
17869 break;
17870 case 3:
17871 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17872 break;
17873 case 4:
17874 type = build_function_type_list (op[0], op[1], op[2], op[3],
17875 NULL_TREE);
17876 break;
17877 default:
17878 gcc_unreachable ();
17881 def_builtin (d->name, type, d->code);
17885 /* Hash function for builtin functions with up to 3 arguments and a return
17886 type. */
17887 hashval_t
17888 builtin_hasher::hash (builtin_hash_struct *bh)
17890 unsigned ret = 0;
17891 int i;
17893 for (i = 0; i < 4; i++)
17895 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17896 ret = (ret * 2) + bh->uns_p[i];
17899 return ret;
17902 /* Compare builtin hash entries H1 and H2 for equivalence. */
17903 bool
17904 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17906 return ((p1->mode[0] == p2->mode[0])
17907 && (p1->mode[1] == p2->mode[1])
17908 && (p1->mode[2] == p2->mode[2])
17909 && (p1->mode[3] == p2->mode[3])
17910 && (p1->uns_p[0] == p2->uns_p[0])
17911 && (p1->uns_p[1] == p2->uns_p[1])
17912 && (p1->uns_p[2] == p2->uns_p[2])
17913 && (p1->uns_p[3] == p2->uns_p[3]));
17916 /* Map types for builtin functions with an explicit return type and up to 3
17917 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17918 of the argument. */
17919 static tree
17920 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17921 machine_mode mode_arg1, machine_mode mode_arg2,
17922 enum rs6000_builtins builtin, const char *name)
17924 struct builtin_hash_struct h;
17925 struct builtin_hash_struct *h2;
17926 int num_args = 3;
17927 int i;
17928 tree ret_type = NULL_TREE;
17929 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17931 /* Create builtin_hash_table. */
17932 if (builtin_hash_table == NULL)
17933 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17935 h.type = NULL_TREE;
17936 h.mode[0] = mode_ret;
17937 h.mode[1] = mode_arg0;
17938 h.mode[2] = mode_arg1;
17939 h.mode[3] = mode_arg2;
17940 h.uns_p[0] = 0;
17941 h.uns_p[1] = 0;
17942 h.uns_p[2] = 0;
17943 h.uns_p[3] = 0;
17945 /* If the builtin is a type that produces unsigned results or takes unsigned
17946 arguments, and it is returned as a decl for the vectorizer (such as
17947 widening multiplies, permute), make sure the arguments and return value
17948 are type correct. */
17949 switch (builtin)
17951 /* unsigned 1 argument functions. */
17952 case CRYPTO_BUILTIN_VSBOX:
17953 case P8V_BUILTIN_VGBBD:
17954 case MISC_BUILTIN_CDTBCD:
17955 case MISC_BUILTIN_CBCDTD:
17956 h.uns_p[0] = 1;
17957 h.uns_p[1] = 1;
17958 break;
17960 /* unsigned 2 argument functions. */
17961 case ALTIVEC_BUILTIN_VMULEUB:
17962 case ALTIVEC_BUILTIN_VMULEUH:
17963 case ALTIVEC_BUILTIN_VMULEUW:
17964 case ALTIVEC_BUILTIN_VMULOUB:
17965 case ALTIVEC_BUILTIN_VMULOUH:
17966 case ALTIVEC_BUILTIN_VMULOUW:
17967 case CRYPTO_BUILTIN_VCIPHER:
17968 case CRYPTO_BUILTIN_VCIPHERLAST:
17969 case CRYPTO_BUILTIN_VNCIPHER:
17970 case CRYPTO_BUILTIN_VNCIPHERLAST:
17971 case CRYPTO_BUILTIN_VPMSUMB:
17972 case CRYPTO_BUILTIN_VPMSUMH:
17973 case CRYPTO_BUILTIN_VPMSUMW:
17974 case CRYPTO_BUILTIN_VPMSUMD:
17975 case CRYPTO_BUILTIN_VPMSUM:
17976 case MISC_BUILTIN_ADDG6S:
17977 case MISC_BUILTIN_DIVWEU:
17978 case MISC_BUILTIN_DIVWEUO:
17979 case MISC_BUILTIN_DIVDEU:
17980 case MISC_BUILTIN_DIVDEUO:
17981 case VSX_BUILTIN_UDIV_V2DI:
17982 case ALTIVEC_BUILTIN_VMAXUB:
17983 case ALTIVEC_BUILTIN_VMINUB:
17984 case ALTIVEC_BUILTIN_VMAXUH:
17985 case ALTIVEC_BUILTIN_VMINUH:
17986 case ALTIVEC_BUILTIN_VMAXUW:
17987 case ALTIVEC_BUILTIN_VMINUW:
17988 case P8V_BUILTIN_VMAXUD:
17989 case P8V_BUILTIN_VMINUD:
17990 h.uns_p[0] = 1;
17991 h.uns_p[1] = 1;
17992 h.uns_p[2] = 1;
17993 break;
17995 /* unsigned 3 argument functions. */
17996 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17997 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17998 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17999 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18000 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18001 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18002 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18003 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18004 case VSX_BUILTIN_VPERM_16QI_UNS:
18005 case VSX_BUILTIN_VPERM_8HI_UNS:
18006 case VSX_BUILTIN_VPERM_4SI_UNS:
18007 case VSX_BUILTIN_VPERM_2DI_UNS:
18008 case VSX_BUILTIN_XXSEL_16QI_UNS:
18009 case VSX_BUILTIN_XXSEL_8HI_UNS:
18010 case VSX_BUILTIN_XXSEL_4SI_UNS:
18011 case VSX_BUILTIN_XXSEL_2DI_UNS:
18012 case CRYPTO_BUILTIN_VPERMXOR:
18013 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18014 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18015 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18016 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18017 case CRYPTO_BUILTIN_VSHASIGMAW:
18018 case CRYPTO_BUILTIN_VSHASIGMAD:
18019 case CRYPTO_BUILTIN_VSHASIGMA:
18020 h.uns_p[0] = 1;
18021 h.uns_p[1] = 1;
18022 h.uns_p[2] = 1;
18023 h.uns_p[3] = 1;
18024 break;
18026 /* signed permute functions with unsigned char mask. */
18027 case ALTIVEC_BUILTIN_VPERM_16QI:
18028 case ALTIVEC_BUILTIN_VPERM_8HI:
18029 case ALTIVEC_BUILTIN_VPERM_4SI:
18030 case ALTIVEC_BUILTIN_VPERM_4SF:
18031 case ALTIVEC_BUILTIN_VPERM_2DI:
18032 case ALTIVEC_BUILTIN_VPERM_2DF:
18033 case VSX_BUILTIN_VPERM_16QI:
18034 case VSX_BUILTIN_VPERM_8HI:
18035 case VSX_BUILTIN_VPERM_4SI:
18036 case VSX_BUILTIN_VPERM_4SF:
18037 case VSX_BUILTIN_VPERM_2DI:
18038 case VSX_BUILTIN_VPERM_2DF:
18039 h.uns_p[3] = 1;
18040 break;
18042 /* unsigned args, signed return. */
18043 case VSX_BUILTIN_XVCVUXDSP:
18044 case VSX_BUILTIN_XVCVUXDDP_UNS:
18045 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18046 h.uns_p[1] = 1;
18047 break;
18049 /* signed args, unsigned return. */
18050 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18051 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18052 case MISC_BUILTIN_UNPACK_TD:
18053 case MISC_BUILTIN_UNPACK_V1TI:
18054 h.uns_p[0] = 1;
18055 break;
18057 /* unsigned arguments for 128-bit pack instructions. */
18058 case MISC_BUILTIN_PACK_TD:
18059 case MISC_BUILTIN_PACK_V1TI:
18060 h.uns_p[1] = 1;
18061 h.uns_p[2] = 1;
18062 break;
18064 /* unsigned second arguments (vector shift right). */
18065 case ALTIVEC_BUILTIN_VSRB:
18066 case ALTIVEC_BUILTIN_VSRH:
18067 case ALTIVEC_BUILTIN_VSRW:
18068 case P8V_BUILTIN_VSRD:
18069 h.uns_p[2] = 1;
18070 break;
18072 default:
18073 break;
18076 /* Figure out how many args are present. */
18077 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18078 num_args--;
18080 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18081 if (!ret_type && h.uns_p[0])
18082 ret_type = builtin_mode_to_type[h.mode[0]][0];
18084 if (!ret_type)
18085 fatal_error (input_location,
18086 "internal error: builtin function %qs had an unexpected "
18087 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
18089 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18090 arg_type[i] = NULL_TREE;
18092 for (i = 0; i < num_args; i++)
18094 int m = (int) h.mode[i+1];
18095 int uns_p = h.uns_p[i+1];
18097 arg_type[i] = builtin_mode_to_type[m][uns_p];
18098 if (!arg_type[i] && uns_p)
18099 arg_type[i] = builtin_mode_to_type[m][0];
18101 if (!arg_type[i])
18102 fatal_error (input_location,
18103 "internal error: builtin function %qs, argument %d "
18104 "had unexpected argument type %qs", name, i,
18105 GET_MODE_NAME (m));
18108 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18109 if (*found == NULL)
18111 h2 = ggc_alloc<builtin_hash_struct> ();
18112 *h2 = h;
18113 *found = h2;
18115 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18116 arg_type[2], NULL_TREE);
18119 return (*found)->type;
18122 static void
18123 rs6000_common_init_builtins (void)
18125 const struct builtin_description *d;
18126 size_t i;
18128 tree opaque_ftype_opaque = NULL_TREE;
18129 tree opaque_ftype_opaque_opaque = NULL_TREE;
18130 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18131 tree v2si_ftype = NULL_TREE;
18132 tree v2si_ftype_qi = NULL_TREE;
18133 tree v2si_ftype_v2si_qi = NULL_TREE;
18134 tree v2si_ftype_int_qi = NULL_TREE;
18135 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18137 if (!TARGET_PAIRED_FLOAT)
18139 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18140 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18143 /* Paired builtins are only available if you build a compiler with the
18144 appropriate options, so only create those builtins with the appropriate
18145 compiler option. Create Altivec and VSX builtins on machines with at
18146 least the general purpose extensions (970 and newer) to allow the use of
18147 the target attribute.. */
18149 if (TARGET_EXTRA_BUILTINS)
18150 builtin_mask |= RS6000_BTM_COMMON;
18152 /* Add the ternary operators. */
18153 d = bdesc_3arg;
18154 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18156 tree type;
18157 HOST_WIDE_INT mask = d->mask;
18159 if ((mask & builtin_mask) != mask)
18161 if (TARGET_DEBUG_BUILTIN)
18162 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18163 continue;
18166 if (rs6000_overloaded_builtin_p (d->code))
18168 if (! (type = opaque_ftype_opaque_opaque_opaque))
18169 type = opaque_ftype_opaque_opaque_opaque
18170 = build_function_type_list (opaque_V4SI_type_node,
18171 opaque_V4SI_type_node,
18172 opaque_V4SI_type_node,
18173 opaque_V4SI_type_node,
18174 NULL_TREE);
18176 else
18178 enum insn_code icode = d->icode;
18179 if (d->name == 0)
18181 if (TARGET_DEBUG_BUILTIN)
18182 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18183 (long unsigned)i);
18185 continue;
18188 if (icode == CODE_FOR_nothing)
18190 if (TARGET_DEBUG_BUILTIN)
18191 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18192 d->name);
18194 continue;
18197 type = builtin_function_type (insn_data[icode].operand[0].mode,
18198 insn_data[icode].operand[1].mode,
18199 insn_data[icode].operand[2].mode,
18200 insn_data[icode].operand[3].mode,
18201 d->code, d->name);
18204 def_builtin (d->name, type, d->code);
18207 /* Add the binary operators. */
18208 d = bdesc_2arg;
18209 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18211 machine_mode mode0, mode1, mode2;
18212 tree type;
18213 HOST_WIDE_INT mask = d->mask;
18215 if ((mask & builtin_mask) != mask)
18217 if (TARGET_DEBUG_BUILTIN)
18218 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18219 continue;
18222 if (rs6000_overloaded_builtin_p (d->code))
18224 if (! (type = opaque_ftype_opaque_opaque))
18225 type = opaque_ftype_opaque_opaque
18226 = build_function_type_list (opaque_V4SI_type_node,
18227 opaque_V4SI_type_node,
18228 opaque_V4SI_type_node,
18229 NULL_TREE);
18231 else
18233 enum insn_code icode = d->icode;
18234 if (d->name == 0)
18236 if (TARGET_DEBUG_BUILTIN)
18237 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18238 (long unsigned)i);
18240 continue;
18243 if (icode == CODE_FOR_nothing)
18245 if (TARGET_DEBUG_BUILTIN)
18246 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18247 d->name);
18249 continue;
18252 mode0 = insn_data[icode].operand[0].mode;
18253 mode1 = insn_data[icode].operand[1].mode;
18254 mode2 = insn_data[icode].operand[2].mode;
18256 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18258 if (! (type = v2si_ftype_v2si_qi))
18259 type = v2si_ftype_v2si_qi
18260 = build_function_type_list (opaque_V2SI_type_node,
18261 opaque_V2SI_type_node,
18262 char_type_node,
18263 NULL_TREE);
18266 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18267 && mode2 == QImode)
18269 if (! (type = v2si_ftype_int_qi))
18270 type = v2si_ftype_int_qi
18271 = build_function_type_list (opaque_V2SI_type_node,
18272 integer_type_node,
18273 char_type_node,
18274 NULL_TREE);
18277 else
18278 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18279 d->code, d->name);
18282 def_builtin (d->name, type, d->code);
18285 /* Add the simple unary operators. */
18286 d = bdesc_1arg;
18287 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18289 machine_mode mode0, mode1;
18290 tree type;
18291 HOST_WIDE_INT mask = d->mask;
18293 if ((mask & builtin_mask) != mask)
18295 if (TARGET_DEBUG_BUILTIN)
18296 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18297 continue;
18300 if (rs6000_overloaded_builtin_p (d->code))
18302 if (! (type = opaque_ftype_opaque))
18303 type = opaque_ftype_opaque
18304 = build_function_type_list (opaque_V4SI_type_node,
18305 opaque_V4SI_type_node,
18306 NULL_TREE);
18308 else
18310 enum insn_code icode = d->icode;
18311 if (d->name == 0)
18313 if (TARGET_DEBUG_BUILTIN)
18314 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18315 (long unsigned)i);
18317 continue;
18320 if (icode == CODE_FOR_nothing)
18322 if (TARGET_DEBUG_BUILTIN)
18323 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18324 d->name);
18326 continue;
18329 mode0 = insn_data[icode].operand[0].mode;
18330 mode1 = insn_data[icode].operand[1].mode;
18332 if (mode0 == V2SImode && mode1 == QImode)
18334 if (! (type = v2si_ftype_qi))
18335 type = v2si_ftype_qi
18336 = build_function_type_list (opaque_V2SI_type_node,
18337 char_type_node,
18338 NULL_TREE);
18341 else
18342 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18343 d->code, d->name);
18346 def_builtin (d->name, type, d->code);
18349 /* Add the simple no-argument operators. */
18350 d = bdesc_0arg;
18351 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18353 machine_mode mode0;
18354 tree type;
18355 HOST_WIDE_INT mask = d->mask;
18357 if ((mask & builtin_mask) != mask)
18359 if (TARGET_DEBUG_BUILTIN)
18360 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18361 continue;
18363 if (rs6000_overloaded_builtin_p (d->code))
18365 if (!opaque_ftype_opaque)
18366 opaque_ftype_opaque
18367 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18368 type = opaque_ftype_opaque;
18370 else
18372 enum insn_code icode = d->icode;
18373 if (d->name == 0)
18375 if (TARGET_DEBUG_BUILTIN)
18376 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18377 (long unsigned) i);
18378 continue;
18380 if (icode == CODE_FOR_nothing)
18382 if (TARGET_DEBUG_BUILTIN)
18383 fprintf (stderr,
18384 "rs6000_builtin, skip no-argument %s (no code)\n",
18385 d->name);
18386 continue;
18388 mode0 = insn_data[icode].operand[0].mode;
18389 if (mode0 == V2SImode)
18391 /* code for paired single */
18392 if (! (type = v2si_ftype))
18394 v2si_ftype
18395 = build_function_type_list (opaque_V2SI_type_node,
18396 NULL_TREE);
18397 type = v2si_ftype;
18400 else
18401 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18402 d->code, d->name);
18404 def_builtin (d->name, type, d->code);
18408 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18409 static void
18410 init_float128_ibm (machine_mode mode)
18412 if (!TARGET_XL_COMPAT)
18414 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18415 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18416 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18417 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18419 if (!TARGET_HARD_FLOAT)
18421 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18422 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18423 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18424 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18425 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18426 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18427 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18428 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18430 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18431 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18432 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18433 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18434 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18435 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18436 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18437 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18440 else
18442 set_optab_libfunc (add_optab, mode, "_xlqadd");
18443 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18444 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18445 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18448 /* Add various conversions for IFmode to use the traditional TFmode
18449 names. */
18450 if (mode == IFmode)
18452 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18453 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18454 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18455 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18456 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18457 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18459 if (TARGET_POWERPC64)
18461 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18462 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18463 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18464 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18469 /* Set up IEEE 128-bit floating point routines. Use different names if the
18470 arguments can be passed in a vector register. The historical PowerPC
18471 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18472 continue to use that if we aren't using vector registers to pass IEEE
18473 128-bit floating point. */
18475 static void
18476 init_float128_ieee (machine_mode mode)
18478 if (FLOAT128_VECTOR_P (mode))
18480 set_optab_libfunc (add_optab, mode, "__addkf3");
18481 set_optab_libfunc (sub_optab, mode, "__subkf3");
18482 set_optab_libfunc (neg_optab, mode, "__negkf2");
18483 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18484 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18485 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18486 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18488 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18489 set_optab_libfunc (ne_optab, mode, "__nekf2");
18490 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18491 set_optab_libfunc (ge_optab, mode, "__gekf2");
18492 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18493 set_optab_libfunc (le_optab, mode, "__lekf2");
18494 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18496 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18497 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18498 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18499 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18501 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18502 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18503 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18505 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18506 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18507 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18509 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18510 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18511 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18512 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18513 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18514 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18516 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18517 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18518 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18519 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18521 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18522 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18523 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18524 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18526 if (TARGET_POWERPC64)
18528 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18529 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18530 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18531 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18535 else
18537 set_optab_libfunc (add_optab, mode, "_q_add");
18538 set_optab_libfunc (sub_optab, mode, "_q_sub");
18539 set_optab_libfunc (neg_optab, mode, "_q_neg");
18540 set_optab_libfunc (smul_optab, mode, "_q_mul");
18541 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18542 if (TARGET_PPC_GPOPT)
18543 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18545 set_optab_libfunc (eq_optab, mode, "_q_feq");
18546 set_optab_libfunc (ne_optab, mode, "_q_fne");
18547 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18548 set_optab_libfunc (ge_optab, mode, "_q_fge");
18549 set_optab_libfunc (lt_optab, mode, "_q_flt");
18550 set_optab_libfunc (le_optab, mode, "_q_fle");
18552 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18553 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18554 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18555 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18556 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18557 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18558 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18559 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18563 static void
18564 rs6000_init_libfuncs (void)
18566 /* __float128 support. */
18567 if (TARGET_FLOAT128_TYPE)
18569 init_float128_ibm (IFmode);
18570 init_float128_ieee (KFmode);
18573 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18574 if (TARGET_LONG_DOUBLE_128)
18576 if (!TARGET_IEEEQUAD)
18577 init_float128_ibm (TFmode);
18579 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18580 else
18581 init_float128_ieee (TFmode);
18585 /* Emit a potentially record-form instruction, setting DST from SRC.
18586 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18587 signed comparison of DST with zero. If DOT is 1, the generated RTL
18588 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18589 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18590 a separate COMPARE. */
18592 void
18593 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18595 if (dot == 0)
18597 emit_move_insn (dst, src);
18598 return;
18601 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18603 emit_move_insn (dst, src);
18604 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18605 return;
18608 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18609 if (dot == 1)
18611 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18612 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18614 else
18616 rtx set = gen_rtx_SET (dst, src);
18617 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18622 /* A validation routine: say whether CODE, a condition code, and MODE
18623 match. The other alternatives either don't make sense or should
18624 never be generated. */
18626 void
18627 validate_condition_mode (enum rtx_code code, machine_mode mode)
18629 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18630 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18631 && GET_MODE_CLASS (mode) == MODE_CC);
18633 /* These don't make sense. */
18634 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18635 || mode != CCUNSmode);
18637 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18638 || mode == CCUNSmode);
18640 gcc_assert (mode == CCFPmode
18641 || (code != ORDERED && code != UNORDERED
18642 && code != UNEQ && code != LTGT
18643 && code != UNGT && code != UNLT
18644 && code != UNGE && code != UNLE));
18646 /* These should never be generated except for
18647 flag_finite_math_only. */
18648 gcc_assert (mode != CCFPmode
18649 || flag_finite_math_only
18650 || (code != LE && code != GE
18651 && code != UNEQ && code != LTGT
18652 && code != UNGT && code != UNLT));
18654 /* These are invalid; the information is not there. */
18655 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18659 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18660 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18661 not zero, store there the bit offset (counted from the right) where
18662 the single stretch of 1 bits begins; and similarly for B, the bit
18663 offset where it ends. */
18665 bool
18666 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18668 unsigned HOST_WIDE_INT val = INTVAL (mask);
18669 unsigned HOST_WIDE_INT bit;
18670 int nb, ne;
18671 int n = GET_MODE_PRECISION (mode);
18673 if (mode != DImode && mode != SImode)
18674 return false;
18676 if (INTVAL (mask) >= 0)
18678 bit = val & -val;
18679 ne = exact_log2 (bit);
18680 nb = exact_log2 (val + bit);
18682 else if (val + 1 == 0)
18684 nb = n;
18685 ne = 0;
18687 else if (val & 1)
18689 val = ~val;
18690 bit = val & -val;
18691 nb = exact_log2 (bit);
18692 ne = exact_log2 (val + bit);
18694 else
18696 bit = val & -val;
18697 ne = exact_log2 (bit);
18698 if (val + bit == 0)
18699 nb = n;
18700 else
18701 nb = 0;
18704 nb--;
18706 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18707 return false;
18709 if (b)
18710 *b = nb;
18711 if (e)
18712 *e = ne;
18714 return true;
18717 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18718 or rldicr instruction, to implement an AND with it in mode MODE. */
18720 bool
18721 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18723 int nb, ne;
18725 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18726 return false;
18728 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18729 does not wrap. */
18730 if (mode == DImode)
18731 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18733 /* For SImode, rlwinm can do everything. */
18734 if (mode == SImode)
18735 return (nb < 32 && ne < 32);
18737 return false;
18740 /* Return the instruction template for an AND with mask in mode MODE, with
18741 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18743 const char *
18744 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18746 int nb, ne;
18748 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18749 gcc_unreachable ();
18751 if (mode == DImode && ne == 0)
18753 operands[3] = GEN_INT (63 - nb);
18754 if (dot)
18755 return "rldicl. %0,%1,0,%3";
18756 return "rldicl %0,%1,0,%3";
18759 if (mode == DImode && nb == 63)
18761 operands[3] = GEN_INT (63 - ne);
18762 if (dot)
18763 return "rldicr. %0,%1,0,%3";
18764 return "rldicr %0,%1,0,%3";
18767 if (nb < 32 && ne < 32)
18769 operands[3] = GEN_INT (31 - nb);
18770 operands[4] = GEN_INT (31 - ne);
18771 if (dot)
18772 return "rlwinm. %0,%1,0,%3,%4";
18773 return "rlwinm %0,%1,0,%3,%4";
18776 gcc_unreachable ();
18779 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18780 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18781 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18783 bool
18784 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18786 int nb, ne;
18788 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18789 return false;
18791 int n = GET_MODE_PRECISION (mode);
18792 int sh = -1;
18794 if (CONST_INT_P (XEXP (shift, 1)))
18796 sh = INTVAL (XEXP (shift, 1));
18797 if (sh < 0 || sh >= n)
18798 return false;
18801 rtx_code code = GET_CODE (shift);
18803 /* Convert any shift by 0 to a rotate, to simplify below code. */
18804 if (sh == 0)
18805 code = ROTATE;
18807 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18808 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18809 code = ASHIFT;
18810 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18812 code = LSHIFTRT;
18813 sh = n - sh;
18816 /* DImode rotates need rld*. */
18817 if (mode == DImode && code == ROTATE)
18818 return (nb == 63 || ne == 0 || ne == sh);
18820 /* SImode rotates need rlw*. */
18821 if (mode == SImode && code == ROTATE)
18822 return (nb < 32 && ne < 32 && sh < 32);
18824 /* Wrap-around masks are only okay for rotates. */
18825 if (ne > nb)
18826 return false;
18828 /* Variable shifts are only okay for rotates. */
18829 if (sh < 0)
18830 return false;
18832 /* Don't allow ASHIFT if the mask is wrong for that. */
18833 if (code == ASHIFT && ne < sh)
18834 return false;
18836 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18837 if the mask is wrong for that. */
18838 if (nb < 32 && ne < 32 && sh < 32
18839 && !(code == LSHIFTRT && nb >= 32 - sh))
18840 return true;
18842 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18843 if the mask is wrong for that. */
18844 if (code == LSHIFTRT)
18845 sh = 64 - sh;
18846 if (nb == 63 || ne == 0 || ne == sh)
18847 return !(code == LSHIFTRT && nb >= sh);
18849 return false;
18852 /* Return the instruction template for a shift with mask in mode MODE, with
18853 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18855 const char *
18856 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18858 int nb, ne;
18860 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18861 gcc_unreachable ();
18863 if (mode == DImode && ne == 0)
18865 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18866 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18867 operands[3] = GEN_INT (63 - nb);
18868 if (dot)
18869 return "rld%I2cl. %0,%1,%2,%3";
18870 return "rld%I2cl %0,%1,%2,%3";
18873 if (mode == DImode && nb == 63)
18875 operands[3] = GEN_INT (63 - ne);
18876 if (dot)
18877 return "rld%I2cr. %0,%1,%2,%3";
18878 return "rld%I2cr %0,%1,%2,%3";
18881 if (mode == DImode
18882 && GET_CODE (operands[4]) != LSHIFTRT
18883 && CONST_INT_P (operands[2])
18884 && ne == INTVAL (operands[2]))
18886 operands[3] = GEN_INT (63 - nb);
18887 if (dot)
18888 return "rld%I2c. %0,%1,%2,%3";
18889 return "rld%I2c %0,%1,%2,%3";
18892 if (nb < 32 && ne < 32)
18894 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18895 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18896 operands[3] = GEN_INT (31 - nb);
18897 operands[4] = GEN_INT (31 - ne);
18898 /* This insn can also be a 64-bit rotate with mask that really makes
18899 it just a shift right (with mask); the %h below are to adjust for
18900 that situation (shift count is >= 32 in that case). */
18901 if (dot)
18902 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18903 return "rlw%I2nm %0,%1,%h2,%3,%4";
18906 gcc_unreachable ();
18909 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18910 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18911 ASHIFT, or LSHIFTRT) in mode MODE. */
18913 bool
18914 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18916 int nb, ne;
18918 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18919 return false;
18921 int n = GET_MODE_PRECISION (mode);
18923 int sh = INTVAL (XEXP (shift, 1));
18924 if (sh < 0 || sh >= n)
18925 return false;
18927 rtx_code code = GET_CODE (shift);
18929 /* Convert any shift by 0 to a rotate, to simplify below code. */
18930 if (sh == 0)
18931 code = ROTATE;
18933 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18934 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18935 code = ASHIFT;
18936 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18938 code = LSHIFTRT;
18939 sh = n - sh;
18942 /* DImode rotates need rldimi. */
18943 if (mode == DImode && code == ROTATE)
18944 return (ne == sh);
18946 /* SImode rotates need rlwimi. */
18947 if (mode == SImode && code == ROTATE)
18948 return (nb < 32 && ne < 32 && sh < 32);
18950 /* Wrap-around masks are only okay for rotates. */
18951 if (ne > nb)
18952 return false;
18954 /* Don't allow ASHIFT if the mask is wrong for that. */
18955 if (code == ASHIFT && ne < sh)
18956 return false;
18958 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18959 if the mask is wrong for that. */
18960 if (nb < 32 && ne < 32 && sh < 32
18961 && !(code == LSHIFTRT && nb >= 32 - sh))
18962 return true;
18964 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18965 if the mask is wrong for that. */
18966 if (code == LSHIFTRT)
18967 sh = 64 - sh;
18968 if (ne == sh)
18969 return !(code == LSHIFTRT && nb >= sh);
18971 return false;
18974 /* Return the instruction template for an insert with mask in mode MODE, with
18975 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18977 const char *
18978 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18980 int nb, ne;
18982 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18983 gcc_unreachable ();
18985 /* Prefer rldimi because rlwimi is cracked. */
18986 if (TARGET_POWERPC64
18987 && (!dot || mode == DImode)
18988 && GET_CODE (operands[4]) != LSHIFTRT
18989 && ne == INTVAL (operands[2]))
18991 operands[3] = GEN_INT (63 - nb);
18992 if (dot)
18993 return "rldimi. %0,%1,%2,%3";
18994 return "rldimi %0,%1,%2,%3";
18997 if (nb < 32 && ne < 32)
18999 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
19000 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
19001 operands[3] = GEN_INT (31 - nb);
19002 operands[4] = GEN_INT (31 - ne);
19003 if (dot)
19004 return "rlwimi. %0,%1,%2,%3,%4";
19005 return "rlwimi %0,%1,%2,%3,%4";
19008 gcc_unreachable ();
19011 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
19012 using two machine instructions. */
19014 bool
19015 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
19017 /* There are two kinds of AND we can handle with two insns:
19018 1) those we can do with two rl* insn;
19019 2) ori[s];xori[s].
19021 We do not handle that last case yet. */
19023 /* If there is just one stretch of ones, we can do it. */
19024 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
19025 return true;
19027 /* Otherwise, fill in the lowest "hole"; if we can do the result with
19028 one insn, we can do the whole thing with two. */
19029 unsigned HOST_WIDE_INT val = INTVAL (c);
19030 unsigned HOST_WIDE_INT bit1 = val & -val;
19031 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19032 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19033 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19034 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
19037 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
19038 If EXPAND is true, split rotate-and-mask instructions we generate to
19039 their constituent parts as well (this is used during expand); if DOT
19040 is 1, make the last insn a record-form instruction clobbering the
19041 destination GPR and setting the CC reg (from operands[3]); if 2, set
19042 that GPR as well as the CC reg. */
19044 void
19045 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
19047 gcc_assert (!(expand && dot));
19049 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
19051 /* If it is one stretch of ones, it is DImode; shift left, mask, then
19052 shift right. This generates better code than doing the masks without
19053 shifts, or shifting first right and then left. */
19054 int nb, ne;
19055 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
19057 gcc_assert (mode == DImode);
19059 int shift = 63 - nb;
19060 if (expand)
19062 rtx tmp1 = gen_reg_rtx (DImode);
19063 rtx tmp2 = gen_reg_rtx (DImode);
19064 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
19065 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
19066 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
19068 else
19070 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
19071 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
19072 emit_move_insn (operands[0], tmp);
19073 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
19074 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19076 return;
19079 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
19080 that does the rest. */
19081 unsigned HOST_WIDE_INT bit1 = val & -val;
19082 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
19083 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
19084 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
19086 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
19087 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
19089 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
19091 /* Two "no-rotate"-and-mask instructions, for SImode. */
19092 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
19094 gcc_assert (mode == SImode);
19096 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19097 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
19098 emit_move_insn (reg, tmp);
19099 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19100 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19101 return;
19104 gcc_assert (mode == DImode);
19106 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
19107 insns; we have to do the first in SImode, because it wraps. */
19108 if (mask2 <= 0xffffffff
19109 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
19111 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
19112 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
19113 GEN_INT (mask1));
19114 rtx reg_low = gen_lowpart (SImode, reg);
19115 emit_move_insn (reg_low, tmp);
19116 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
19117 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19118 return;
19121 /* Two rld* insns: rotate, clear the hole in the middle (which now is
19122 at the top end), rotate back and clear the other hole. */
19123 int right = exact_log2 (bit3);
19124 int left = 64 - right;
19126 /* Rotate the mask too. */
19127 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
19129 if (expand)
19131 rtx tmp1 = gen_reg_rtx (DImode);
19132 rtx tmp2 = gen_reg_rtx (DImode);
19133 rtx tmp3 = gen_reg_rtx (DImode);
19134 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
19135 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
19136 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
19137 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
19139 else
19141 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
19142 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
19143 emit_move_insn (operands[0], tmp);
19144 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
19145 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
19146 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
19150 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
19151 for lfq and stfq insns iff the registers are hard registers. */
19154 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
19156 /* We might have been passed a SUBREG. */
19157 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
19158 return 0;
19160 /* We might have been passed non floating point registers. */
19161 if (!FP_REGNO_P (REGNO (reg1))
19162 || !FP_REGNO_P (REGNO (reg2)))
19163 return 0;
19165 return (REGNO (reg1) == REGNO (reg2) - 1);
19168 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
19169 addr1 and addr2 must be in consecutive memory locations
19170 (addr2 == addr1 + 8). */
19173 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
19175 rtx addr1, addr2;
19176 unsigned int reg1, reg2;
19177 int offset1, offset2;
19179 /* The mems cannot be volatile. */
19180 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
19181 return 0;
19183 addr1 = XEXP (mem1, 0);
19184 addr2 = XEXP (mem2, 0);
19186 /* Extract an offset (if used) from the first addr. */
19187 if (GET_CODE (addr1) == PLUS)
19189 /* If not a REG, return zero. */
19190 if (GET_CODE (XEXP (addr1, 0)) != REG)
19191 return 0;
19192 else
19194 reg1 = REGNO (XEXP (addr1, 0));
19195 /* The offset must be constant! */
19196 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
19197 return 0;
19198 offset1 = INTVAL (XEXP (addr1, 1));
19201 else if (GET_CODE (addr1) != REG)
19202 return 0;
19203 else
19205 reg1 = REGNO (addr1);
19206 /* This was a simple (mem (reg)) expression. Offset is 0. */
19207 offset1 = 0;
19210 /* And now for the second addr. */
19211 if (GET_CODE (addr2) == PLUS)
19213 /* If not a REG, return zero. */
19214 if (GET_CODE (XEXP (addr2, 0)) != REG)
19215 return 0;
19216 else
19218 reg2 = REGNO (XEXP (addr2, 0));
19219 /* The offset must be constant. */
19220 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
19221 return 0;
19222 offset2 = INTVAL (XEXP (addr2, 1));
19225 else if (GET_CODE (addr2) != REG)
19226 return 0;
19227 else
19229 reg2 = REGNO (addr2);
19230 /* This was a simple (mem (reg)) expression. Offset is 0. */
19231 offset2 = 0;
19234 /* Both of these must have the same base register. */
19235 if (reg1 != reg2)
19236 return 0;
19238 /* The offset for the second addr must be 8 more than the first addr. */
19239 if (offset2 != offset1 + 8)
19240 return 0;
19242 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19243 instructions. */
19244 return 1;
19247 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19248 need to use DDmode, in all other cases we can use the same mode. */
19249 static machine_mode
19250 rs6000_secondary_memory_needed_mode (machine_mode mode)
19252 if (lra_in_progress && mode == SDmode)
19253 return DDmode;
19254 return mode;
19257 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19258 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19259 only work on the traditional altivec registers, note if an altivec register
19260 was chosen. */
19262 static enum rs6000_reg_type
19263 register_to_reg_type (rtx reg, bool *is_altivec)
19265 HOST_WIDE_INT regno;
19266 enum reg_class rclass;
19268 if (GET_CODE (reg) == SUBREG)
19269 reg = SUBREG_REG (reg);
19271 if (!REG_P (reg))
19272 return NO_REG_TYPE;
19274 regno = REGNO (reg);
19275 if (regno >= FIRST_PSEUDO_REGISTER)
19277 if (!lra_in_progress && !reload_completed)
19278 return PSEUDO_REG_TYPE;
19280 regno = true_regnum (reg);
19281 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
19282 return PSEUDO_REG_TYPE;
19285 gcc_assert (regno >= 0);
19287 if (is_altivec && ALTIVEC_REGNO_P (regno))
19288 *is_altivec = true;
19290 rclass = rs6000_regno_regclass[regno];
19291 return reg_class_to_reg_type[(int)rclass];
19294 /* Helper function to return the cost of adding a TOC entry address. */
19296 static inline int
19297 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19299 int ret;
19301 if (TARGET_CMODEL != CMODEL_SMALL)
19302 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19304 else
19305 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19307 return ret;
19310 /* Helper function for rs6000_secondary_reload to determine whether the memory
19311 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19312 needs reloading. Return negative if the memory is not handled by the memory
19313 helper functions and to try a different reload method, 0 if no additional
19314 instructions are need, and positive to give the extra cost for the
19315 memory. */
19317 static int
19318 rs6000_secondary_reload_memory (rtx addr,
19319 enum reg_class rclass,
19320 machine_mode mode)
19322 int extra_cost = 0;
19323 rtx reg, and_arg, plus_arg0, plus_arg1;
19324 addr_mask_type addr_mask;
19325 const char *type = NULL;
19326 const char *fail_msg = NULL;
19328 if (GPR_REG_CLASS_P (rclass))
19329 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19331 else if (rclass == FLOAT_REGS)
19332 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19334 else if (rclass == ALTIVEC_REGS)
19335 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19337 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19338 else if (rclass == VSX_REGS)
19339 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19340 & ~RELOAD_REG_AND_M16);
19342 /* If the register allocator hasn't made up its mind yet on the register
19343 class to use, settle on defaults to use. */
19344 else if (rclass == NO_REGS)
19346 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19347 & ~RELOAD_REG_AND_M16);
19349 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19350 addr_mask &= ~(RELOAD_REG_INDEXED
19351 | RELOAD_REG_PRE_INCDEC
19352 | RELOAD_REG_PRE_MODIFY);
19355 else
19356 addr_mask = 0;
19358 /* If the register isn't valid in this register class, just return now. */
19359 if ((addr_mask & RELOAD_REG_VALID) == 0)
19361 if (TARGET_DEBUG_ADDR)
19363 fprintf (stderr,
19364 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19365 "not valid in class\n",
19366 GET_MODE_NAME (mode), reg_class_names[rclass]);
19367 debug_rtx (addr);
19370 return -1;
19373 switch (GET_CODE (addr))
19375 /* Does the register class supports auto update forms for this mode? We
19376 don't need a scratch register, since the powerpc only supports
19377 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19378 case PRE_INC:
19379 case PRE_DEC:
19380 reg = XEXP (addr, 0);
19381 if (!base_reg_operand (addr, GET_MODE (reg)))
19383 fail_msg = "no base register #1";
19384 extra_cost = -1;
19387 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19389 extra_cost = 1;
19390 type = "update";
19392 break;
19394 case PRE_MODIFY:
19395 reg = XEXP (addr, 0);
19396 plus_arg1 = XEXP (addr, 1);
19397 if (!base_reg_operand (reg, GET_MODE (reg))
19398 || GET_CODE (plus_arg1) != PLUS
19399 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19401 fail_msg = "bad PRE_MODIFY";
19402 extra_cost = -1;
19405 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19407 extra_cost = 1;
19408 type = "update";
19410 break;
19412 /* Do we need to simulate AND -16 to clear the bottom address bits used
19413 in VMX load/stores? Only allow the AND for vector sizes. */
19414 case AND:
19415 and_arg = XEXP (addr, 0);
19416 if (GET_MODE_SIZE (mode) != 16
19417 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19418 || INTVAL (XEXP (addr, 1)) != -16)
19420 fail_msg = "bad Altivec AND #1";
19421 extra_cost = -1;
19424 if (rclass != ALTIVEC_REGS)
19426 if (legitimate_indirect_address_p (and_arg, false))
19427 extra_cost = 1;
19429 else if (legitimate_indexed_address_p (and_arg, false))
19430 extra_cost = 2;
19432 else
19434 fail_msg = "bad Altivec AND #2";
19435 extra_cost = -1;
19438 type = "and";
19440 break;
19442 /* If this is an indirect address, make sure it is a base register. */
19443 case REG:
19444 case SUBREG:
19445 if (!legitimate_indirect_address_p (addr, false))
19447 extra_cost = 1;
19448 type = "move";
19450 break;
19452 /* If this is an indexed address, make sure the register class can handle
19453 indexed addresses for this mode. */
19454 case PLUS:
19455 plus_arg0 = XEXP (addr, 0);
19456 plus_arg1 = XEXP (addr, 1);
19458 /* (plus (plus (reg) (constant)) (constant)) is generated during
19459 push_reload processing, so handle it now. */
19460 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19462 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19464 extra_cost = 1;
19465 type = "offset";
19469 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19470 push_reload processing, so handle it now. */
19471 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19473 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19475 extra_cost = 1;
19476 type = "indexed #2";
19480 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19482 fail_msg = "no base register #2";
19483 extra_cost = -1;
19486 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19488 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19489 || !legitimate_indexed_address_p (addr, false))
19491 extra_cost = 1;
19492 type = "indexed";
19496 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19497 && CONST_INT_P (plus_arg1))
19499 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19501 extra_cost = 1;
19502 type = "vector d-form offset";
19506 /* Make sure the register class can handle offset addresses. */
19507 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19509 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19511 extra_cost = 1;
19512 type = "offset #2";
19516 else
19518 fail_msg = "bad PLUS";
19519 extra_cost = -1;
19522 break;
19524 case LO_SUM:
19525 /* Quad offsets are restricted and can't handle normal addresses. */
19526 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19528 extra_cost = -1;
19529 type = "vector d-form lo_sum";
19532 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19534 fail_msg = "bad LO_SUM";
19535 extra_cost = -1;
19538 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19540 extra_cost = 1;
19541 type = "lo_sum";
19543 break;
19545 /* Static addresses need to create a TOC entry. */
19546 case CONST:
19547 case SYMBOL_REF:
19548 case LABEL_REF:
19549 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19551 extra_cost = -1;
19552 type = "vector d-form lo_sum #2";
19555 else
19557 type = "address";
19558 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19560 break;
19562 /* TOC references look like offsetable memory. */
19563 case UNSPEC:
19564 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19566 fail_msg = "bad UNSPEC";
19567 extra_cost = -1;
19570 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19572 extra_cost = -1;
19573 type = "vector d-form lo_sum #3";
19576 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19578 extra_cost = 1;
19579 type = "toc reference";
19581 break;
19583 default:
19585 fail_msg = "bad address";
19586 extra_cost = -1;
19590 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19592 if (extra_cost < 0)
19593 fprintf (stderr,
19594 "rs6000_secondary_reload_memory error: mode = %s, "
19595 "class = %s, addr_mask = '%s', %s\n",
19596 GET_MODE_NAME (mode),
19597 reg_class_names[rclass],
19598 rs6000_debug_addr_mask (addr_mask, false),
19599 (fail_msg != NULL) ? fail_msg : "<bad address>");
19601 else
19602 fprintf (stderr,
19603 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19604 "addr_mask = '%s', extra cost = %d, %s\n",
19605 GET_MODE_NAME (mode),
19606 reg_class_names[rclass],
19607 rs6000_debug_addr_mask (addr_mask, false),
19608 extra_cost,
19609 (type) ? type : "<none>");
19611 debug_rtx (addr);
19614 return extra_cost;
19617 /* Helper function for rs6000_secondary_reload to return true if a move to a
19618 different register classe is really a simple move. */
19620 static bool
19621 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19622 enum rs6000_reg_type from_type,
19623 machine_mode mode)
19625 int size = GET_MODE_SIZE (mode);
19627 /* Add support for various direct moves available. In this function, we only
19628 look at cases where we don't need any extra registers, and one or more
19629 simple move insns are issued. Originally small integers are not allowed
19630 in FPR/VSX registers. Single precision binary floating is not a simple
19631 move because we need to convert to the single precision memory layout.
19632 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19633 need special direct move handling, which we do not support yet. */
19634 if (TARGET_DIRECT_MOVE
19635 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19636 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19638 if (TARGET_POWERPC64)
19640 /* ISA 2.07: MTVSRD or MVFVSRD. */
19641 if (size == 8)
19642 return true;
19644 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19645 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19646 return true;
19649 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19650 if (TARGET_P8_VECTOR)
19652 if (mode == SImode)
19653 return true;
19655 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19656 return true;
19659 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19660 if (mode == SDmode)
19661 return true;
19664 /* Power6+: MFTGPR or MFFGPR. */
19665 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19666 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19667 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19668 return true;
19670 /* Move to/from SPR. */
19671 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19672 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19673 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19674 return true;
19676 return false;
19679 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19680 special direct moves that involve allocating an extra register, return the
19681 insn code of the helper function if there is such a function or
19682 CODE_FOR_nothing if not. */
19684 static bool
19685 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19686 enum rs6000_reg_type from_type,
19687 machine_mode mode,
19688 secondary_reload_info *sri,
19689 bool altivec_p)
19691 bool ret = false;
19692 enum insn_code icode = CODE_FOR_nothing;
19693 int cost = 0;
19694 int size = GET_MODE_SIZE (mode);
19696 if (TARGET_POWERPC64 && size == 16)
19698 /* Handle moving 128-bit values from GPRs to VSX point registers on
19699 ISA 2.07 (power8, power9) when running in 64-bit mode using
19700 XXPERMDI to glue the two 64-bit values back together. */
19701 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19703 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19704 icode = reg_addr[mode].reload_vsx_gpr;
19707 /* Handle moving 128-bit values from VSX point registers to GPRs on
19708 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19709 bottom 64-bit value. */
19710 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19712 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19713 icode = reg_addr[mode].reload_gpr_vsx;
19717 else if (TARGET_POWERPC64 && mode == SFmode)
19719 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19721 cost = 3; /* xscvdpspn, mfvsrd, and. */
19722 icode = reg_addr[mode].reload_gpr_vsx;
19725 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19727 cost = 2; /* mtvsrz, xscvspdpn. */
19728 icode = reg_addr[mode].reload_vsx_gpr;
19732 else if (!TARGET_POWERPC64 && size == 8)
19734 /* Handle moving 64-bit values from GPRs to floating point registers on
19735 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19736 32-bit values back together. Altivec register classes must be handled
19737 specially since a different instruction is used, and the secondary
19738 reload support requires a single instruction class in the scratch
19739 register constraint. However, right now TFmode is not allowed in
19740 Altivec registers, so the pattern will never match. */
19741 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19743 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19744 icode = reg_addr[mode].reload_fpr_gpr;
19748 if (icode != CODE_FOR_nothing)
19750 ret = true;
19751 if (sri)
19753 sri->icode = icode;
19754 sri->extra_cost = cost;
19758 return ret;
19761 /* Return whether a move between two register classes can be done either
19762 directly (simple move) or via a pattern that uses a single extra temporary
19763 (using ISA 2.07's direct move in this case. */
19765 static bool
19766 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19767 enum rs6000_reg_type from_type,
19768 machine_mode mode,
19769 secondary_reload_info *sri,
19770 bool altivec_p)
19772 /* Fall back to load/store reloads if either type is not a register. */
19773 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19774 return false;
19776 /* If we haven't allocated registers yet, assume the move can be done for the
19777 standard register types. */
19778 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19779 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19780 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19781 return true;
19783 /* Moves to the same set of registers is a simple move for non-specialized
19784 registers. */
19785 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19786 return true;
19788 /* Check whether a simple move can be done directly. */
19789 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19791 if (sri)
19793 sri->icode = CODE_FOR_nothing;
19794 sri->extra_cost = 0;
19796 return true;
19799 /* Now check if we can do it in a few steps. */
19800 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19801 altivec_p);
19804 /* Inform reload about cases where moving X with a mode MODE to a register in
19805 RCLASS requires an extra scratch or immediate register. Return the class
19806 needed for the immediate register.
19808 For VSX and Altivec, we may need a register to convert sp+offset into
19809 reg+sp.
19811 For misaligned 64-bit gpr loads and stores we need a register to
19812 convert an offset address to indirect. */
19814 static reg_class_t
19815 rs6000_secondary_reload (bool in_p,
19816 rtx x,
19817 reg_class_t rclass_i,
19818 machine_mode mode,
19819 secondary_reload_info *sri)
19821 enum reg_class rclass = (enum reg_class) rclass_i;
19822 reg_class_t ret = ALL_REGS;
19823 enum insn_code icode;
19824 bool default_p = false;
19825 bool done_p = false;
19827 /* Allow subreg of memory before/during reload. */
19828 bool memory_p = (MEM_P (x)
19829 || (!reload_completed && GET_CODE (x) == SUBREG
19830 && MEM_P (SUBREG_REG (x))));
19832 sri->icode = CODE_FOR_nothing;
19833 sri->t_icode = CODE_FOR_nothing;
19834 sri->extra_cost = 0;
19835 icode = ((in_p)
19836 ? reg_addr[mode].reload_load
19837 : reg_addr[mode].reload_store);
19839 if (REG_P (x) || register_operand (x, mode))
19841 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19842 bool altivec_p = (rclass == ALTIVEC_REGS);
19843 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19845 if (!in_p)
19846 std::swap (to_type, from_type);
19848 /* Can we do a direct move of some sort? */
19849 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19850 altivec_p))
19852 icode = (enum insn_code)sri->icode;
19853 default_p = false;
19854 done_p = true;
19855 ret = NO_REGS;
19859 /* Make sure 0.0 is not reloaded or forced into memory. */
19860 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19862 ret = NO_REGS;
19863 default_p = false;
19864 done_p = true;
19867 /* If this is a scalar floating point value and we want to load it into the
19868 traditional Altivec registers, do it via a move via a traditional floating
19869 point register, unless we have D-form addressing. Also make sure that
19870 non-zero constants use a FPR. */
19871 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19872 && !mode_supports_vmx_dform (mode)
19873 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19874 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19876 ret = FLOAT_REGS;
19877 default_p = false;
19878 done_p = true;
19881 /* Handle reload of load/stores if we have reload helper functions. */
19882 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19884 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19885 mode);
19887 if (extra_cost >= 0)
19889 done_p = true;
19890 ret = NO_REGS;
19891 if (extra_cost > 0)
19893 sri->extra_cost = extra_cost;
19894 sri->icode = icode;
19899 /* Handle unaligned loads and stores of integer registers. */
19900 if (!done_p && TARGET_POWERPC64
19901 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19902 && memory_p
19903 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19905 rtx addr = XEXP (x, 0);
19906 rtx off = address_offset (addr);
19908 if (off != NULL_RTX)
19910 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19911 unsigned HOST_WIDE_INT offset = INTVAL (off);
19913 /* We need a secondary reload when our legitimate_address_p
19914 says the address is good (as otherwise the entire address
19915 will be reloaded), and the offset is not a multiple of
19916 four or we have an address wrap. Address wrap will only
19917 occur for LO_SUMs since legitimate_offset_address_p
19918 rejects addresses for 16-byte mems that will wrap. */
19919 if (GET_CODE (addr) == LO_SUM
19920 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19921 && ((offset & 3) != 0
19922 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19923 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19924 && (offset & 3) != 0))
19926 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19927 if (in_p)
19928 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19929 : CODE_FOR_reload_di_load);
19930 else
19931 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19932 : CODE_FOR_reload_di_store);
19933 sri->extra_cost = 2;
19934 ret = NO_REGS;
19935 done_p = true;
19937 else
19938 default_p = true;
19940 else
19941 default_p = true;
19944 if (!done_p && !TARGET_POWERPC64
19945 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19946 && memory_p
19947 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19949 rtx addr = XEXP (x, 0);
19950 rtx off = address_offset (addr);
19952 if (off != NULL_RTX)
19954 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19955 unsigned HOST_WIDE_INT offset = INTVAL (off);
19957 /* We need a secondary reload when our legitimate_address_p
19958 says the address is good (as otherwise the entire address
19959 will be reloaded), and we have a wrap.
19961 legitimate_lo_sum_address_p allows LO_SUM addresses to
19962 have any offset so test for wrap in the low 16 bits.
19964 legitimate_offset_address_p checks for the range
19965 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19966 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19967 [0x7ff4,0x7fff] respectively, so test for the
19968 intersection of these ranges, [0x7ffc,0x7fff] and
19969 [0x7ff4,0x7ff7] respectively.
19971 Note that the address we see here may have been
19972 manipulated by legitimize_reload_address. */
19973 if (GET_CODE (addr) == LO_SUM
19974 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19975 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19977 if (in_p)
19978 sri->icode = CODE_FOR_reload_si_load;
19979 else
19980 sri->icode = CODE_FOR_reload_si_store;
19981 sri->extra_cost = 2;
19982 ret = NO_REGS;
19983 done_p = true;
19985 else
19986 default_p = true;
19988 else
19989 default_p = true;
19992 if (!done_p)
19993 default_p = true;
19995 if (default_p)
19996 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19998 gcc_assert (ret != ALL_REGS);
20000 if (TARGET_DEBUG_ADDR)
20002 fprintf (stderr,
20003 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
20004 "mode = %s",
20005 reg_class_names[ret],
20006 in_p ? "true" : "false",
20007 reg_class_names[rclass],
20008 GET_MODE_NAME (mode));
20010 if (reload_completed)
20011 fputs (", after reload", stderr);
20013 if (!done_p)
20014 fputs (", done_p not set", stderr);
20016 if (default_p)
20017 fputs (", default secondary reload", stderr);
20019 if (sri->icode != CODE_FOR_nothing)
20020 fprintf (stderr, ", reload func = %s, extra cost = %d",
20021 insn_data[sri->icode].name, sri->extra_cost);
20023 else if (sri->extra_cost > 0)
20024 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
20026 fputs ("\n", stderr);
20027 debug_rtx (x);
20030 return ret;
20033 /* Better tracing for rs6000_secondary_reload_inner. */
20035 static void
20036 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
20037 bool store_p)
20039 rtx set, clobber;
20041 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
20043 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
20044 store_p ? "store" : "load");
20046 if (store_p)
20047 set = gen_rtx_SET (mem, reg);
20048 else
20049 set = gen_rtx_SET (reg, mem);
20051 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
20052 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
20055 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
20056 ATTRIBUTE_NORETURN;
20058 static void
20059 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
20060 bool store_p)
20062 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
20063 gcc_unreachable ();
20066 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
20067 reload helper functions. These were identified in
20068 rs6000_secondary_reload_memory, and if reload decided to use the secondary
20069 reload, it calls the insns:
20070 reload_<RELOAD:mode>_<P:mptrsize>_store
20071 reload_<RELOAD:mode>_<P:mptrsize>_load
20073 which in turn calls this function, to do whatever is necessary to create
20074 valid addresses. */
20076 void
20077 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
20079 int regno = true_regnum (reg);
20080 machine_mode mode = GET_MODE (reg);
20081 addr_mask_type addr_mask;
20082 rtx addr;
20083 rtx new_addr;
20084 rtx op_reg, op0, op1;
20085 rtx and_op;
20086 rtx cc_clobber;
20087 rtvec rv;
20089 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
20090 || !base_reg_operand (scratch, GET_MODE (scratch)))
20091 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20093 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
20094 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
20096 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
20097 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
20099 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
20100 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
20102 else
20103 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20105 /* Make sure the mode is valid in this register class. */
20106 if ((addr_mask & RELOAD_REG_VALID) == 0)
20107 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20109 if (TARGET_DEBUG_ADDR)
20110 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
20112 new_addr = addr = XEXP (mem, 0);
20113 switch (GET_CODE (addr))
20115 /* Does the register class support auto update forms for this mode? If
20116 not, do the update now. We don't need a scratch register, since the
20117 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
20118 case PRE_INC:
20119 case PRE_DEC:
20120 op_reg = XEXP (addr, 0);
20121 if (!base_reg_operand (op_reg, Pmode))
20122 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20124 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
20126 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
20127 new_addr = op_reg;
20129 break;
20131 case PRE_MODIFY:
20132 op0 = XEXP (addr, 0);
20133 op1 = XEXP (addr, 1);
20134 if (!base_reg_operand (op0, Pmode)
20135 || GET_CODE (op1) != PLUS
20136 || !rtx_equal_p (op0, XEXP (op1, 0)))
20137 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20139 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
20141 emit_insn (gen_rtx_SET (op0, op1));
20142 new_addr = reg;
20144 break;
20146 /* Do we need to simulate AND -16 to clear the bottom address bits used
20147 in VMX load/stores? */
20148 case AND:
20149 op0 = XEXP (addr, 0);
20150 op1 = XEXP (addr, 1);
20151 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
20153 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
20154 op_reg = op0;
20156 else if (GET_CODE (op1) == PLUS)
20158 emit_insn (gen_rtx_SET (scratch, op1));
20159 op_reg = scratch;
20162 else
20163 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20165 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
20166 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
20167 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
20168 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
20169 new_addr = scratch;
20171 break;
20173 /* If this is an indirect address, make sure it is a base register. */
20174 case REG:
20175 case SUBREG:
20176 if (!base_reg_operand (addr, GET_MODE (addr)))
20178 emit_insn (gen_rtx_SET (scratch, addr));
20179 new_addr = scratch;
20181 break;
20183 /* If this is an indexed address, make sure the register class can handle
20184 indexed addresses for this mode. */
20185 case PLUS:
20186 op0 = XEXP (addr, 0);
20187 op1 = XEXP (addr, 1);
20188 if (!base_reg_operand (op0, Pmode))
20189 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20191 else if (int_reg_operand (op1, Pmode))
20193 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20195 emit_insn (gen_rtx_SET (scratch, addr));
20196 new_addr = scratch;
20200 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
20202 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
20203 || !quad_address_p (addr, mode, false))
20205 emit_insn (gen_rtx_SET (scratch, addr));
20206 new_addr = scratch;
20210 /* Make sure the register class can handle offset addresses. */
20211 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
20213 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20215 emit_insn (gen_rtx_SET (scratch, addr));
20216 new_addr = scratch;
20220 else
20221 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20223 break;
20225 case LO_SUM:
20226 op0 = XEXP (addr, 0);
20227 op1 = XEXP (addr, 1);
20228 if (!base_reg_operand (op0, Pmode))
20229 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20231 else if (int_reg_operand (op1, Pmode))
20233 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20235 emit_insn (gen_rtx_SET (scratch, addr));
20236 new_addr = scratch;
20240 /* Quad offsets are restricted and can't handle normal addresses. */
20241 else if (mode_supports_vsx_dform_quad (mode))
20243 emit_insn (gen_rtx_SET (scratch, addr));
20244 new_addr = scratch;
20247 /* Make sure the register class can handle offset addresses. */
20248 else if (legitimate_lo_sum_address_p (mode, addr, false))
20250 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20252 emit_insn (gen_rtx_SET (scratch, addr));
20253 new_addr = scratch;
20257 else
20258 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20260 break;
20262 case SYMBOL_REF:
20263 case CONST:
20264 case LABEL_REF:
20265 rs6000_emit_move (scratch, addr, Pmode);
20266 new_addr = scratch;
20267 break;
20269 default:
20270 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20273 /* Adjust the address if it changed. */
20274 if (addr != new_addr)
20276 mem = replace_equiv_address_nv (mem, new_addr);
20277 if (TARGET_DEBUG_ADDR)
20278 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20281 /* Now create the move. */
20282 if (store_p)
20283 emit_insn (gen_rtx_SET (mem, reg));
20284 else
20285 emit_insn (gen_rtx_SET (reg, mem));
20287 return;
20290 /* Convert reloads involving 64-bit gprs and misaligned offset
20291 addressing, or multiple 32-bit gprs and offsets that are too large,
20292 to use indirect addressing. */
20294 void
20295 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20297 int regno = true_regnum (reg);
20298 enum reg_class rclass;
20299 rtx addr;
20300 rtx scratch_or_premodify = scratch;
20302 if (TARGET_DEBUG_ADDR)
20304 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20305 store_p ? "store" : "load");
20306 fprintf (stderr, "reg:\n");
20307 debug_rtx (reg);
20308 fprintf (stderr, "mem:\n");
20309 debug_rtx (mem);
20310 fprintf (stderr, "scratch:\n");
20311 debug_rtx (scratch);
20314 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20315 gcc_assert (GET_CODE (mem) == MEM);
20316 rclass = REGNO_REG_CLASS (regno);
20317 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20318 addr = XEXP (mem, 0);
20320 if (GET_CODE (addr) == PRE_MODIFY)
20322 gcc_assert (REG_P (XEXP (addr, 0))
20323 && GET_CODE (XEXP (addr, 1)) == PLUS
20324 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20325 scratch_or_premodify = XEXP (addr, 0);
20326 if (!HARD_REGISTER_P (scratch_or_premodify))
20327 /* If we have a pseudo here then reload will have arranged
20328 to have it replaced, but only in the original insn.
20329 Use the replacement here too. */
20330 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
20332 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
20333 expressions from the original insn, without unsharing them.
20334 Any RTL that points into the original insn will of course
20335 have register replacements applied. That is why we don't
20336 need to look for replacements under the PLUS. */
20337 addr = XEXP (addr, 1);
20339 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20341 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20343 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20345 /* Now create the move. */
20346 if (store_p)
20347 emit_insn (gen_rtx_SET (mem, reg));
20348 else
20349 emit_insn (gen_rtx_SET (reg, mem));
20351 return;
20354 /* Given an rtx X being reloaded into a reg required to be
20355 in class CLASS, return the class of reg to actually use.
20356 In general this is just CLASS; but on some machines
20357 in some cases it is preferable to use a more restrictive class.
20359 On the RS/6000, we have to return NO_REGS when we want to reload a
20360 floating-point CONST_DOUBLE to force it to be copied to memory.
20362 We also don't want to reload integer values into floating-point
20363 registers if we can at all help it. In fact, this can
20364 cause reload to die, if it tries to generate a reload of CTR
20365 into a FP register and discovers it doesn't have the memory location
20366 required.
20368 ??? Would it be a good idea to have reload do the converse, that is
20369 try to reload floating modes into FP registers if possible?
20372 static enum reg_class
20373 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20375 machine_mode mode = GET_MODE (x);
20376 bool is_constant = CONSTANT_P (x);
20378 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20379 reload class for it. */
20380 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20381 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20382 return NO_REGS;
20384 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20385 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20386 return NO_REGS;
20388 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20389 the reloading of address expressions using PLUS into floating point
20390 registers. */
20391 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20393 if (is_constant)
20395 /* Zero is always allowed in all VSX registers. */
20396 if (x == CONST0_RTX (mode))
20397 return rclass;
20399 /* If this is a vector constant that can be formed with a few Altivec
20400 instructions, we want altivec registers. */
20401 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20402 return ALTIVEC_REGS;
20404 /* If this is an integer constant that can easily be loaded into
20405 vector registers, allow it. */
20406 if (CONST_INT_P (x))
20408 HOST_WIDE_INT value = INTVAL (x);
20410 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20411 2.06 can generate it in the Altivec registers with
20412 VSPLTI<x>. */
20413 if (value == -1)
20415 if (TARGET_P8_VECTOR)
20416 return rclass;
20417 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20418 return ALTIVEC_REGS;
20419 else
20420 return NO_REGS;
20423 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20424 a sign extend in the Altivec registers. */
20425 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20426 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20427 return ALTIVEC_REGS;
20430 /* Force constant to memory. */
20431 return NO_REGS;
20434 /* D-form addressing can easily reload the value. */
20435 if (mode_supports_vmx_dform (mode)
20436 || mode_supports_vsx_dform_quad (mode))
20437 return rclass;
20439 /* If this is a scalar floating point value and we don't have D-form
20440 addressing, prefer the traditional floating point registers so that we
20441 can use D-form (register+offset) addressing. */
20442 if (rclass == VSX_REGS
20443 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20444 return FLOAT_REGS;
20446 /* Prefer the Altivec registers if Altivec is handling the vector
20447 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20448 loads. */
20449 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20450 || mode == V1TImode)
20451 return ALTIVEC_REGS;
20453 return rclass;
20456 if (is_constant || GET_CODE (x) == PLUS)
20458 if (reg_class_subset_p (GENERAL_REGS, rclass))
20459 return GENERAL_REGS;
20460 if (reg_class_subset_p (BASE_REGS, rclass))
20461 return BASE_REGS;
20462 return NO_REGS;
20465 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20466 return GENERAL_REGS;
20468 return rclass;
20471 /* Debug version of rs6000_preferred_reload_class. */
20472 static enum reg_class
20473 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20475 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20477 fprintf (stderr,
20478 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20479 "mode = %s, x:\n",
20480 reg_class_names[ret], reg_class_names[rclass],
20481 GET_MODE_NAME (GET_MODE (x)));
20482 debug_rtx (x);
20484 return ret;
20487 /* If we are copying between FP or AltiVec registers and anything else, we need
20488 a memory location. The exception is when we are targeting ppc64 and the
20489 move to/from fpr to gpr instructions are available. Also, under VSX, you
20490 can copy vector registers from the FP register set to the Altivec register
20491 set and vice versa. */
20493 static bool
20494 rs6000_secondary_memory_needed (enum reg_class from_class,
20495 enum reg_class to_class,
20496 machine_mode mode)
20498 enum rs6000_reg_type from_type, to_type;
20499 bool altivec_p = ((from_class == ALTIVEC_REGS)
20500 || (to_class == ALTIVEC_REGS));
20502 /* If a simple/direct move is available, we don't need secondary memory */
20503 from_type = reg_class_to_reg_type[(int)from_class];
20504 to_type = reg_class_to_reg_type[(int)to_class];
20506 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20507 (secondary_reload_info *)0, altivec_p))
20508 return false;
20510 /* If we have a floating point or vector register class, we need to use
20511 memory to transfer the data. */
20512 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20513 return true;
20515 return false;
20518 /* Debug version of rs6000_secondary_memory_needed. */
20519 static bool
20520 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
20521 enum reg_class to_class,
20522 machine_mode mode)
20524 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
20526 fprintf (stderr,
20527 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20528 "to_class = %s, mode = %s\n",
20529 ret ? "true" : "false",
20530 reg_class_names[from_class],
20531 reg_class_names[to_class],
20532 GET_MODE_NAME (mode));
20534 return ret;
20537 /* Return the register class of a scratch register needed to copy IN into
20538 or out of a register in RCLASS in MODE. If it can be done directly,
20539 NO_REGS is returned. */
20541 static enum reg_class
20542 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20543 rtx in)
20545 int regno;
20547 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20548 #if TARGET_MACHO
20549 && MACHOPIC_INDIRECT
20550 #endif
20553 /* We cannot copy a symbolic operand directly into anything
20554 other than BASE_REGS for TARGET_ELF. So indicate that a
20555 register from BASE_REGS is needed as an intermediate
20556 register.
20558 On Darwin, pic addresses require a load from memory, which
20559 needs a base register. */
20560 if (rclass != BASE_REGS
20561 && (GET_CODE (in) == SYMBOL_REF
20562 || GET_CODE (in) == HIGH
20563 || GET_CODE (in) == LABEL_REF
20564 || GET_CODE (in) == CONST))
20565 return BASE_REGS;
20568 if (GET_CODE (in) == REG)
20570 regno = REGNO (in);
20571 if (regno >= FIRST_PSEUDO_REGISTER)
20573 regno = true_regnum (in);
20574 if (regno >= FIRST_PSEUDO_REGISTER)
20575 regno = -1;
20578 else if (GET_CODE (in) == SUBREG)
20580 regno = true_regnum (in);
20581 if (regno >= FIRST_PSEUDO_REGISTER)
20582 regno = -1;
20584 else
20585 regno = -1;
20587 /* If we have VSX register moves, prefer moving scalar values between
20588 Altivec registers and GPR by going via an FPR (and then via memory)
20589 instead of reloading the secondary memory address for Altivec moves. */
20590 if (TARGET_VSX
20591 && GET_MODE_SIZE (mode) < 16
20592 && !mode_supports_vmx_dform (mode)
20593 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20594 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20595 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20596 && (regno >= 0 && INT_REGNO_P (regno)))))
20597 return FLOAT_REGS;
20599 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20600 into anything. */
20601 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20602 || (regno >= 0 && INT_REGNO_P (regno)))
20603 return NO_REGS;
20605 /* Constants, memory, and VSX registers can go into VSX registers (both the
20606 traditional floating point and the altivec registers). */
20607 if (rclass == VSX_REGS
20608 && (regno == -1 || VSX_REGNO_P (regno)))
20609 return NO_REGS;
20611 /* Constants, memory, and FP registers can go into FP registers. */
20612 if ((regno == -1 || FP_REGNO_P (regno))
20613 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20614 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20616 /* Memory, and AltiVec registers can go into AltiVec registers. */
20617 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20618 && rclass == ALTIVEC_REGS)
20619 return NO_REGS;
20621 /* We can copy among the CR registers. */
20622 if ((rclass == CR_REGS || rclass == CR0_REGS)
20623 && regno >= 0 && CR_REGNO_P (regno))
20624 return NO_REGS;
20626 /* Otherwise, we need GENERAL_REGS. */
20627 return GENERAL_REGS;
20630 /* Debug version of rs6000_secondary_reload_class. */
20631 static enum reg_class
20632 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20633 machine_mode mode, rtx in)
20635 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20636 fprintf (stderr,
20637 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20638 "mode = %s, input rtx:\n",
20639 reg_class_names[ret], reg_class_names[rclass],
20640 GET_MODE_NAME (mode));
20641 debug_rtx (in);
20643 return ret;
20646 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
20648 static bool
20649 rs6000_cannot_change_mode_class (machine_mode from,
20650 machine_mode to,
20651 enum reg_class rclass)
20653 unsigned from_size = GET_MODE_SIZE (from);
20654 unsigned to_size = GET_MODE_SIZE (to);
20656 if (from_size != to_size)
20658 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20660 if (reg_classes_intersect_p (xclass, rclass))
20662 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20663 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20664 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20665 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20667 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20668 single register under VSX because the scalar part of the register
20669 is in the upper 64-bits, and not the lower 64-bits. Types like
20670 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20671 IEEE floating point can't overlap, and neither can small
20672 values. */
20674 if (to_float128_vector_p && from_float128_vector_p)
20675 return false;
20677 else if (to_float128_vector_p || from_float128_vector_p)
20678 return true;
20680 /* TDmode in floating-mode registers must always go into a register
20681 pair with the most significant word in the even-numbered register
20682 to match ISA requirements. In little-endian mode, this does not
20683 match subreg numbering, so we cannot allow subregs. */
20684 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20685 return true;
20687 if (from_size < 8 || to_size < 8)
20688 return true;
20690 if (from_size == 8 && (8 * to_nregs) != to_size)
20691 return true;
20693 if (to_size == 8 && (8 * from_nregs) != from_size)
20694 return true;
20696 return false;
20698 else
20699 return false;
20702 /* Since the VSX register set includes traditional floating point registers
20703 and altivec registers, just check for the size being different instead of
20704 trying to check whether the modes are vector modes. Otherwise it won't
20705 allow say DF and DI to change classes. For types like TFmode and TDmode
20706 that take 2 64-bit registers, rather than a single 128-bit register, don't
20707 allow subregs of those types to other 128 bit types. */
20708 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20710 unsigned num_regs = (from_size + 15) / 16;
20711 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20712 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20713 return true;
20715 return (from_size != 8 && from_size != 16);
20718 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20719 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20720 return true;
20722 return false;
20725 /* Debug version of rs6000_cannot_change_mode_class. */
20726 static bool
20727 rs6000_debug_cannot_change_mode_class (machine_mode from,
20728 machine_mode to,
20729 enum reg_class rclass)
20731 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
20733 fprintf (stderr,
20734 "rs6000_cannot_change_mode_class, return %s, from = %s, "
20735 "to = %s, rclass = %s\n",
20736 ret ? "true" : "false",
20737 GET_MODE_NAME (from), GET_MODE_NAME (to),
20738 reg_class_names[rclass]);
20740 return ret;
20743 /* Return a string to do a move operation of 128 bits of data. */
20745 const char *
20746 rs6000_output_move_128bit (rtx operands[])
20748 rtx dest = operands[0];
20749 rtx src = operands[1];
20750 machine_mode mode = GET_MODE (dest);
20751 int dest_regno;
20752 int src_regno;
20753 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20754 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20756 if (REG_P (dest))
20758 dest_regno = REGNO (dest);
20759 dest_gpr_p = INT_REGNO_P (dest_regno);
20760 dest_fp_p = FP_REGNO_P (dest_regno);
20761 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20762 dest_vsx_p = dest_fp_p | dest_vmx_p;
20764 else
20766 dest_regno = -1;
20767 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20770 if (REG_P (src))
20772 src_regno = REGNO (src);
20773 src_gpr_p = INT_REGNO_P (src_regno);
20774 src_fp_p = FP_REGNO_P (src_regno);
20775 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20776 src_vsx_p = src_fp_p | src_vmx_p;
20778 else
20780 src_regno = -1;
20781 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20784 /* Register moves. */
20785 if (dest_regno >= 0 && src_regno >= 0)
20787 if (dest_gpr_p)
20789 if (src_gpr_p)
20790 return "#";
20792 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20793 return (WORDS_BIG_ENDIAN
20794 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20795 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20797 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20798 return "#";
20801 else if (TARGET_VSX && dest_vsx_p)
20803 if (src_vsx_p)
20804 return "xxlor %x0,%x1,%x1";
20806 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20807 return (WORDS_BIG_ENDIAN
20808 ? "mtvsrdd %x0,%1,%L1"
20809 : "mtvsrdd %x0,%L1,%1");
20811 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20812 return "#";
20815 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20816 return "vor %0,%1,%1";
20818 else if (dest_fp_p && src_fp_p)
20819 return "#";
20822 /* Loads. */
20823 else if (dest_regno >= 0 && MEM_P (src))
20825 if (dest_gpr_p)
20827 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20828 return "lq %0,%1";
20829 else
20830 return "#";
20833 else if (TARGET_ALTIVEC && dest_vmx_p
20834 && altivec_indexed_or_indirect_operand (src, mode))
20835 return "lvx %0,%y1";
20837 else if (TARGET_VSX && dest_vsx_p)
20839 if (mode_supports_vsx_dform_quad (mode)
20840 && quad_address_p (XEXP (src, 0), mode, true))
20841 return "lxv %x0,%1";
20843 else if (TARGET_P9_VECTOR)
20844 return "lxvx %x0,%y1";
20846 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20847 return "lxvw4x %x0,%y1";
20849 else
20850 return "lxvd2x %x0,%y1";
20853 else if (TARGET_ALTIVEC && dest_vmx_p)
20854 return "lvx %0,%y1";
20856 else if (dest_fp_p)
20857 return "#";
20860 /* Stores. */
20861 else if (src_regno >= 0 && MEM_P (dest))
20863 if (src_gpr_p)
20865 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20866 return "stq %1,%0";
20867 else
20868 return "#";
20871 else if (TARGET_ALTIVEC && src_vmx_p
20872 && altivec_indexed_or_indirect_operand (src, mode))
20873 return "stvx %1,%y0";
20875 else if (TARGET_VSX && src_vsx_p)
20877 if (mode_supports_vsx_dform_quad (mode)
20878 && quad_address_p (XEXP (dest, 0), mode, true))
20879 return "stxv %x1,%0";
20881 else if (TARGET_P9_VECTOR)
20882 return "stxvx %x1,%y0";
20884 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20885 return "stxvw4x %x1,%y0";
20887 else
20888 return "stxvd2x %x1,%y0";
20891 else if (TARGET_ALTIVEC && src_vmx_p)
20892 return "stvx %1,%y0";
20894 else if (src_fp_p)
20895 return "#";
20898 /* Constants. */
20899 else if (dest_regno >= 0
20900 && (GET_CODE (src) == CONST_INT
20901 || GET_CODE (src) == CONST_WIDE_INT
20902 || GET_CODE (src) == CONST_DOUBLE
20903 || GET_CODE (src) == CONST_VECTOR))
20905 if (dest_gpr_p)
20906 return "#";
20908 else if ((dest_vmx_p && TARGET_ALTIVEC)
20909 || (dest_vsx_p && TARGET_VSX))
20910 return output_vec_const_move (operands);
20913 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20916 /* Validate a 128-bit move. */
20917 bool
20918 rs6000_move_128bit_ok_p (rtx operands[])
20920 machine_mode mode = GET_MODE (operands[0]);
20921 return (gpc_reg_operand (operands[0], mode)
20922 || gpc_reg_operand (operands[1], mode));
20925 /* Return true if a 128-bit move needs to be split. */
20926 bool
20927 rs6000_split_128bit_ok_p (rtx operands[])
20929 if (!reload_completed)
20930 return false;
20932 if (!gpr_or_gpr_p (operands[0], operands[1]))
20933 return false;
20935 if (quad_load_store_p (operands[0], operands[1]))
20936 return false;
20938 return true;
20942 /* Given a comparison operation, return the bit number in CCR to test. We
20943 know this is a valid comparison.
20945 SCC_P is 1 if this is for an scc. That means that %D will have been
20946 used instead of %C, so the bits will be in different places.
20948 Return -1 if OP isn't a valid comparison for some reason. */
20951 ccr_bit (rtx op, int scc_p)
20953 enum rtx_code code = GET_CODE (op);
20954 machine_mode cc_mode;
20955 int cc_regnum;
20956 int base_bit;
20957 rtx reg;
20959 if (!COMPARISON_P (op))
20960 return -1;
20962 reg = XEXP (op, 0);
20964 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20966 cc_mode = GET_MODE (reg);
20967 cc_regnum = REGNO (reg);
20968 base_bit = 4 * (cc_regnum - CR0_REGNO);
20970 validate_condition_mode (code, cc_mode);
20972 /* When generating a sCOND operation, only positive conditions are
20973 allowed. */
20974 gcc_assert (!scc_p
20975 || code == EQ || code == GT || code == LT || code == UNORDERED
20976 || code == GTU || code == LTU);
20978 switch (code)
20980 case NE:
20981 return scc_p ? base_bit + 3 : base_bit + 2;
20982 case EQ:
20983 return base_bit + 2;
20984 case GT: case GTU: case UNLE:
20985 return base_bit + 1;
20986 case LT: case LTU: case UNGE:
20987 return base_bit;
20988 case ORDERED: case UNORDERED:
20989 return base_bit + 3;
20991 case GE: case GEU:
20992 /* If scc, we will have done a cror to put the bit in the
20993 unordered position. So test that bit. For integer, this is ! LT
20994 unless this is an scc insn. */
20995 return scc_p ? base_bit + 3 : base_bit;
20997 case LE: case LEU:
20998 return scc_p ? base_bit + 3 : base_bit + 1;
21000 default:
21001 gcc_unreachable ();
21005 /* Return the GOT register. */
21008 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
21010 /* The second flow pass currently (June 1999) can't update
21011 regs_ever_live without disturbing other parts of the compiler, so
21012 update it here to make the prolog/epilogue code happy. */
21013 if (!can_create_pseudo_p ()
21014 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
21015 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
21017 crtl->uses_pic_offset_table = 1;
21019 return pic_offset_table_rtx;
21022 static rs6000_stack_t stack_info;
21024 /* Function to init struct machine_function.
21025 This will be called, via a pointer variable,
21026 from push_function_context. */
21028 static struct machine_function *
21029 rs6000_init_machine_status (void)
21031 stack_info.reload_completed = 0;
21032 return ggc_cleared_alloc<machine_function> ();
21035 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
21037 /* Write out a function code label. */
21039 void
21040 rs6000_output_function_entry (FILE *file, const char *fname)
21042 if (fname[0] != '.')
21044 switch (DEFAULT_ABI)
21046 default:
21047 gcc_unreachable ();
21049 case ABI_AIX:
21050 if (DOT_SYMBOLS)
21051 putc ('.', file);
21052 else
21053 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
21054 break;
21056 case ABI_ELFv2:
21057 case ABI_V4:
21058 case ABI_DARWIN:
21059 break;
21063 RS6000_OUTPUT_BASENAME (file, fname);
21066 /* Print an operand. Recognize special options, documented below. */
21068 #if TARGET_ELF
21069 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
21070 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
21071 #else
21072 #define SMALL_DATA_RELOC "sda21"
21073 #define SMALL_DATA_REG 0
21074 #endif
21076 void
21077 print_operand (FILE *file, rtx x, int code)
21079 int i;
21080 unsigned HOST_WIDE_INT uval;
21082 switch (code)
21084 /* %a is output_address. */
21086 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
21087 output_operand. */
21089 case 'D':
21090 /* Like 'J' but get to the GT bit only. */
21091 gcc_assert (REG_P (x));
21093 /* Bit 1 is GT bit. */
21094 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
21096 /* Add one for shift count in rlinm for scc. */
21097 fprintf (file, "%d", i + 1);
21098 return;
21100 case 'e':
21101 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
21102 if (! INT_P (x))
21104 output_operand_lossage ("invalid %%e value");
21105 return;
21108 uval = INTVAL (x);
21109 if ((uval & 0xffff) == 0 && uval != 0)
21110 putc ('s', file);
21111 return;
21113 case 'E':
21114 /* X is a CR register. Print the number of the EQ bit of the CR */
21115 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21116 output_operand_lossage ("invalid %%E value");
21117 else
21118 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
21119 return;
21121 case 'f':
21122 /* X is a CR register. Print the shift count needed to move it
21123 to the high-order four bits. */
21124 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21125 output_operand_lossage ("invalid %%f value");
21126 else
21127 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
21128 return;
21130 case 'F':
21131 /* Similar, but print the count for the rotate in the opposite
21132 direction. */
21133 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21134 output_operand_lossage ("invalid %%F value");
21135 else
21136 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
21137 return;
21139 case 'G':
21140 /* X is a constant integer. If it is negative, print "m",
21141 otherwise print "z". This is to make an aze or ame insn. */
21142 if (GET_CODE (x) != CONST_INT)
21143 output_operand_lossage ("invalid %%G value");
21144 else if (INTVAL (x) >= 0)
21145 putc ('z', file);
21146 else
21147 putc ('m', file);
21148 return;
21150 case 'h':
21151 /* If constant, output low-order five bits. Otherwise, write
21152 normally. */
21153 if (INT_P (x))
21154 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
21155 else
21156 print_operand (file, x, 0);
21157 return;
21159 case 'H':
21160 /* If constant, output low-order six bits. Otherwise, write
21161 normally. */
21162 if (INT_P (x))
21163 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
21164 else
21165 print_operand (file, x, 0);
21166 return;
21168 case 'I':
21169 /* Print `i' if this is a constant, else nothing. */
21170 if (INT_P (x))
21171 putc ('i', file);
21172 return;
21174 case 'j':
21175 /* Write the bit number in CCR for jump. */
21176 i = ccr_bit (x, 0);
21177 if (i == -1)
21178 output_operand_lossage ("invalid %%j code");
21179 else
21180 fprintf (file, "%d", i);
21181 return;
21183 case 'J':
21184 /* Similar, but add one for shift count in rlinm for scc and pass
21185 scc flag to `ccr_bit'. */
21186 i = ccr_bit (x, 1);
21187 if (i == -1)
21188 output_operand_lossage ("invalid %%J code");
21189 else
21190 /* If we want bit 31, write a shift count of zero, not 32. */
21191 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21192 return;
21194 case 'k':
21195 /* X must be a constant. Write the 1's complement of the
21196 constant. */
21197 if (! INT_P (x))
21198 output_operand_lossage ("invalid %%k value");
21199 else
21200 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
21201 return;
21203 case 'K':
21204 /* X must be a symbolic constant on ELF. Write an
21205 expression suitable for an 'addi' that adds in the low 16
21206 bits of the MEM. */
21207 if (GET_CODE (x) == CONST)
21209 if (GET_CODE (XEXP (x, 0)) != PLUS
21210 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
21211 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21212 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
21213 output_operand_lossage ("invalid %%K value");
21215 print_operand_address (file, x);
21216 fputs ("@l", file);
21217 return;
21219 /* %l is output_asm_label. */
21221 case 'L':
21222 /* Write second word of DImode or DFmode reference. Works on register
21223 or non-indexed memory only. */
21224 if (REG_P (x))
21225 fputs (reg_names[REGNO (x) + 1], file);
21226 else if (MEM_P (x))
21228 machine_mode mode = GET_MODE (x);
21229 /* Handle possible auto-increment. Since it is pre-increment and
21230 we have already done it, we can just use an offset of word. */
21231 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21232 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21233 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21234 UNITS_PER_WORD));
21235 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21236 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21237 UNITS_PER_WORD));
21238 else
21239 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21240 UNITS_PER_WORD),
21241 0));
21243 if (small_data_operand (x, GET_MODE (x)))
21244 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21245 reg_names[SMALL_DATA_REG]);
21247 return;
21249 case 'N':
21250 /* Write the number of elements in the vector times 4. */
21251 if (GET_CODE (x) != PARALLEL)
21252 output_operand_lossage ("invalid %%N value");
21253 else
21254 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21255 return;
21257 case 'O':
21258 /* Similar, but subtract 1 first. */
21259 if (GET_CODE (x) != PARALLEL)
21260 output_operand_lossage ("invalid %%O value");
21261 else
21262 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21263 return;
21265 case 'p':
21266 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21267 if (! INT_P (x)
21268 || INTVAL (x) < 0
21269 || (i = exact_log2 (INTVAL (x))) < 0)
21270 output_operand_lossage ("invalid %%p value");
21271 else
21272 fprintf (file, "%d", i);
21273 return;
21275 case 'P':
21276 /* The operand must be an indirect memory reference. The result
21277 is the register name. */
21278 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
21279 || REGNO (XEXP (x, 0)) >= 32)
21280 output_operand_lossage ("invalid %%P value");
21281 else
21282 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21283 return;
21285 case 'q':
21286 /* This outputs the logical code corresponding to a boolean
21287 expression. The expression may have one or both operands
21288 negated (if one, only the first one). For condition register
21289 logical operations, it will also treat the negated
21290 CR codes as NOTs, but not handle NOTs of them. */
21292 const char *const *t = 0;
21293 const char *s;
21294 enum rtx_code code = GET_CODE (x);
21295 static const char * const tbl[3][3] = {
21296 { "and", "andc", "nor" },
21297 { "or", "orc", "nand" },
21298 { "xor", "eqv", "xor" } };
21300 if (code == AND)
21301 t = tbl[0];
21302 else if (code == IOR)
21303 t = tbl[1];
21304 else if (code == XOR)
21305 t = tbl[2];
21306 else
21307 output_operand_lossage ("invalid %%q value");
21309 if (GET_CODE (XEXP (x, 0)) != NOT)
21310 s = t[0];
21311 else
21313 if (GET_CODE (XEXP (x, 1)) == NOT)
21314 s = t[2];
21315 else
21316 s = t[1];
21319 fputs (s, file);
21321 return;
21323 case 'Q':
21324 if (! TARGET_MFCRF)
21325 return;
21326 fputc (',', file);
21327 /* FALLTHRU */
21329 case 'R':
21330 /* X is a CR register. Print the mask for `mtcrf'. */
21331 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
21332 output_operand_lossage ("invalid %%R value");
21333 else
21334 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21335 return;
21337 case 's':
21338 /* Low 5 bits of 32 - value */
21339 if (! INT_P (x))
21340 output_operand_lossage ("invalid %%s value");
21341 else
21342 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21343 return;
21345 case 't':
21346 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21347 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
21349 /* Bit 3 is OV bit. */
21350 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21352 /* If we want bit 31, write a shift count of zero, not 32. */
21353 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21354 return;
21356 case 'T':
21357 /* Print the symbolic name of a branch target register. */
21358 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21359 && REGNO (x) != CTR_REGNO))
21360 output_operand_lossage ("invalid %%T value");
21361 else if (REGNO (x) == LR_REGNO)
21362 fputs ("lr", file);
21363 else
21364 fputs ("ctr", file);
21365 return;
21367 case 'u':
21368 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21369 for use in unsigned operand. */
21370 if (! INT_P (x))
21372 output_operand_lossage ("invalid %%u value");
21373 return;
21376 uval = INTVAL (x);
21377 if ((uval & 0xffff) == 0)
21378 uval >>= 16;
21380 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21381 return;
21383 case 'v':
21384 /* High-order 16 bits of constant for use in signed operand. */
21385 if (! INT_P (x))
21386 output_operand_lossage ("invalid %%v value");
21387 else
21388 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21389 (INTVAL (x) >> 16) & 0xffff);
21390 return;
21392 case 'U':
21393 /* Print `u' if this has an auto-increment or auto-decrement. */
21394 if (MEM_P (x)
21395 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21396 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21397 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21398 putc ('u', file);
21399 return;
21401 case 'V':
21402 /* Print the trap code for this operand. */
21403 switch (GET_CODE (x))
21405 case EQ:
21406 fputs ("eq", file); /* 4 */
21407 break;
21408 case NE:
21409 fputs ("ne", file); /* 24 */
21410 break;
21411 case LT:
21412 fputs ("lt", file); /* 16 */
21413 break;
21414 case LE:
21415 fputs ("le", file); /* 20 */
21416 break;
21417 case GT:
21418 fputs ("gt", file); /* 8 */
21419 break;
21420 case GE:
21421 fputs ("ge", file); /* 12 */
21422 break;
21423 case LTU:
21424 fputs ("llt", file); /* 2 */
21425 break;
21426 case LEU:
21427 fputs ("lle", file); /* 6 */
21428 break;
21429 case GTU:
21430 fputs ("lgt", file); /* 1 */
21431 break;
21432 case GEU:
21433 fputs ("lge", file); /* 5 */
21434 break;
21435 default:
21436 gcc_unreachable ();
21438 break;
21440 case 'w':
21441 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21442 normally. */
21443 if (INT_P (x))
21444 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21445 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21446 else
21447 print_operand (file, x, 0);
21448 return;
21450 case 'x':
21451 /* X is a FPR or Altivec register used in a VSX context. */
21452 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21453 output_operand_lossage ("invalid %%x value");
21454 else
21456 int reg = REGNO (x);
21457 int vsx_reg = (FP_REGNO_P (reg)
21458 ? reg - 32
21459 : reg - FIRST_ALTIVEC_REGNO + 32);
21461 #ifdef TARGET_REGNAMES
21462 if (TARGET_REGNAMES)
21463 fprintf (file, "%%vs%d", vsx_reg);
21464 else
21465 #endif
21466 fprintf (file, "%d", vsx_reg);
21468 return;
21470 case 'X':
21471 if (MEM_P (x)
21472 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21473 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21474 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21475 putc ('x', file);
21476 return;
21478 case 'Y':
21479 /* Like 'L', for third word of TImode/PTImode */
21480 if (REG_P (x))
21481 fputs (reg_names[REGNO (x) + 2], file);
21482 else if (MEM_P (x))
21484 machine_mode mode = GET_MODE (x);
21485 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21486 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21487 output_address (mode, plus_constant (Pmode,
21488 XEXP (XEXP (x, 0), 0), 8));
21489 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21490 output_address (mode, plus_constant (Pmode,
21491 XEXP (XEXP (x, 0), 0), 8));
21492 else
21493 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21494 if (small_data_operand (x, GET_MODE (x)))
21495 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21496 reg_names[SMALL_DATA_REG]);
21498 return;
21500 case 'z':
21501 /* X is a SYMBOL_REF. Write out the name preceded by a
21502 period and without any trailing data in brackets. Used for function
21503 names. If we are configured for System V (or the embedded ABI) on
21504 the PowerPC, do not emit the period, since those systems do not use
21505 TOCs and the like. */
21506 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21508 /* For macho, check to see if we need a stub. */
21509 if (TARGET_MACHO)
21511 const char *name = XSTR (x, 0);
21512 #if TARGET_MACHO
21513 if (darwin_emit_branch_islands
21514 && MACHOPIC_INDIRECT
21515 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21516 name = machopic_indirection_name (x, /*stub_p=*/true);
21517 #endif
21518 assemble_name (file, name);
21520 else if (!DOT_SYMBOLS)
21521 assemble_name (file, XSTR (x, 0));
21522 else
21523 rs6000_output_function_entry (file, XSTR (x, 0));
21524 return;
21526 case 'Z':
21527 /* Like 'L', for last word of TImode/PTImode. */
21528 if (REG_P (x))
21529 fputs (reg_names[REGNO (x) + 3], file);
21530 else if (MEM_P (x))
21532 machine_mode mode = GET_MODE (x);
21533 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21534 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21535 output_address (mode, plus_constant (Pmode,
21536 XEXP (XEXP (x, 0), 0), 12));
21537 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21538 output_address (mode, plus_constant (Pmode,
21539 XEXP (XEXP (x, 0), 0), 12));
21540 else
21541 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21542 if (small_data_operand (x, GET_MODE (x)))
21543 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21544 reg_names[SMALL_DATA_REG]);
21546 return;
21548 /* Print AltiVec memory operand. */
21549 case 'y':
21551 rtx tmp;
21553 gcc_assert (MEM_P (x));
21555 tmp = XEXP (x, 0);
21557 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
21558 && GET_CODE (tmp) == AND
21559 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21560 && INTVAL (XEXP (tmp, 1)) == -16)
21561 tmp = XEXP (tmp, 0);
21562 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21563 && GET_CODE (tmp) == PRE_MODIFY)
21564 tmp = XEXP (tmp, 1);
21565 if (REG_P (tmp))
21566 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21567 else
21569 if (GET_CODE (tmp) != PLUS
21570 || !REG_P (XEXP (tmp, 0))
21571 || !REG_P (XEXP (tmp, 1)))
21573 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21574 break;
21577 if (REGNO (XEXP (tmp, 0)) == 0)
21578 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21579 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21580 else
21581 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21582 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21584 break;
21587 case 0:
21588 if (REG_P (x))
21589 fprintf (file, "%s", reg_names[REGNO (x)]);
21590 else if (MEM_P (x))
21592 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21593 know the width from the mode. */
21594 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21595 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21596 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21597 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21598 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21599 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21600 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21601 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21602 else
21603 output_address (GET_MODE (x), XEXP (x, 0));
21605 else
21607 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21608 /* This hack along with a corresponding hack in
21609 rs6000_output_addr_const_extra arranges to output addends
21610 where the assembler expects to find them. eg.
21611 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21612 without this hack would be output as "x@toc+4". We
21613 want "x+4@toc". */
21614 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21615 else
21616 output_addr_const (file, x);
21618 return;
21620 case '&':
21621 if (const char *name = get_some_local_dynamic_name ())
21622 assemble_name (file, name);
21623 else
21624 output_operand_lossage ("'%%&' used without any "
21625 "local dynamic TLS references");
21626 return;
21628 default:
21629 output_operand_lossage ("invalid %%xn code");
21633 /* Print the address of an operand. */
21635 void
21636 print_operand_address (FILE *file, rtx x)
21638 if (REG_P (x))
21639 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21640 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21641 || GET_CODE (x) == LABEL_REF)
21643 output_addr_const (file, x);
21644 if (small_data_operand (x, GET_MODE (x)))
21645 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21646 reg_names[SMALL_DATA_REG]);
21647 else
21648 gcc_assert (!TARGET_TOC);
21650 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21651 && REG_P (XEXP (x, 1)))
21653 if (REGNO (XEXP (x, 0)) == 0)
21654 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21655 reg_names[ REGNO (XEXP (x, 0)) ]);
21656 else
21657 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21658 reg_names[ REGNO (XEXP (x, 1)) ]);
21660 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21661 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21662 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21663 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21664 #if TARGET_MACHO
21665 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21666 && CONSTANT_P (XEXP (x, 1)))
21668 fprintf (file, "lo16(");
21669 output_addr_const (file, XEXP (x, 1));
21670 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21672 #endif
21673 #if TARGET_ELF
21674 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21675 && CONSTANT_P (XEXP (x, 1)))
21677 output_addr_const (file, XEXP (x, 1));
21678 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21680 #endif
21681 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21683 /* This hack along with a corresponding hack in
21684 rs6000_output_addr_const_extra arranges to output addends
21685 where the assembler expects to find them. eg.
21686 (lo_sum (reg 9)
21687 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21688 without this hack would be output as "x@toc+8@l(9)". We
21689 want "x+8@toc@l(9)". */
21690 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21691 if (GET_CODE (x) == LO_SUM)
21692 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21693 else
21694 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21696 else
21697 gcc_unreachable ();
21700 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21702 static bool
21703 rs6000_output_addr_const_extra (FILE *file, rtx x)
21705 if (GET_CODE (x) == UNSPEC)
21706 switch (XINT (x, 1))
21708 case UNSPEC_TOCREL:
21709 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21710 && REG_P (XVECEXP (x, 0, 1))
21711 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21712 output_addr_const (file, XVECEXP (x, 0, 0));
21713 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21715 if (INTVAL (tocrel_offset_oac) >= 0)
21716 fprintf (file, "+");
21717 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21719 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21721 putc ('-', file);
21722 assemble_name (file, toc_label_name);
21723 need_toc_init = 1;
21725 else if (TARGET_ELF)
21726 fputs ("@toc", file);
21727 return true;
21729 #if TARGET_MACHO
21730 case UNSPEC_MACHOPIC_OFFSET:
21731 output_addr_const (file, XVECEXP (x, 0, 0));
21732 putc ('-', file);
21733 machopic_output_function_base_name (file);
21734 return true;
21735 #endif
21737 return false;
21740 /* Target hook for assembling integer objects. The PowerPC version has
21741 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21742 is defined. It also needs to handle DI-mode objects on 64-bit
21743 targets. */
21745 static bool
21746 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21748 #ifdef RELOCATABLE_NEEDS_FIXUP
21749 /* Special handling for SI values. */
21750 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21752 static int recurse = 0;
21754 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21755 the .fixup section. Since the TOC section is already relocated, we
21756 don't need to mark it here. We used to skip the text section, but it
21757 should never be valid for relocated addresses to be placed in the text
21758 section. */
21759 if (DEFAULT_ABI == ABI_V4
21760 && (TARGET_RELOCATABLE || flag_pic > 1)
21761 && in_section != toc_section
21762 && !recurse
21763 && !CONST_SCALAR_INT_P (x)
21764 && CONSTANT_P (x))
21766 char buf[256];
21768 recurse = 1;
21769 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21770 fixuplabelno++;
21771 ASM_OUTPUT_LABEL (asm_out_file, buf);
21772 fprintf (asm_out_file, "\t.long\t(");
21773 output_addr_const (asm_out_file, x);
21774 fprintf (asm_out_file, ")@fixup\n");
21775 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21776 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21777 fprintf (asm_out_file, "\t.long\t");
21778 assemble_name (asm_out_file, buf);
21779 fprintf (asm_out_file, "\n\t.previous\n");
21780 recurse = 0;
21781 return true;
21783 /* Remove initial .'s to turn a -mcall-aixdesc function
21784 address into the address of the descriptor, not the function
21785 itself. */
21786 else if (GET_CODE (x) == SYMBOL_REF
21787 && XSTR (x, 0)[0] == '.'
21788 && DEFAULT_ABI == ABI_AIX)
21790 const char *name = XSTR (x, 0);
21791 while (*name == '.')
21792 name++;
21794 fprintf (asm_out_file, "\t.long\t%s\n", name);
21795 return true;
21798 #endif /* RELOCATABLE_NEEDS_FIXUP */
21799 return default_assemble_integer (x, size, aligned_p);
21802 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21803 /* Emit an assembler directive to set symbol visibility for DECL to
21804 VISIBILITY_TYPE. */
21806 static void
21807 rs6000_assemble_visibility (tree decl, int vis)
21809 if (TARGET_XCOFF)
21810 return;
21812 /* Functions need to have their entry point symbol visibility set as
21813 well as their descriptor symbol visibility. */
21814 if (DEFAULT_ABI == ABI_AIX
21815 && DOT_SYMBOLS
21816 && TREE_CODE (decl) == FUNCTION_DECL)
21818 static const char * const visibility_types[] = {
21819 NULL, "protected", "hidden", "internal"
21822 const char *name, *type;
21824 name = ((* targetm.strip_name_encoding)
21825 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21826 type = visibility_types[vis];
21828 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21829 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21831 else
21832 default_assemble_visibility (decl, vis);
21834 #endif
21836 enum rtx_code
21837 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21839 /* Reversal of FP compares takes care -- an ordered compare
21840 becomes an unordered compare and vice versa. */
21841 if (mode == CCFPmode
21842 && (!flag_finite_math_only
21843 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21844 || code == UNEQ || code == LTGT))
21845 return reverse_condition_maybe_unordered (code);
21846 else
21847 return reverse_condition (code);
21850 /* Generate a compare for CODE. Return a brand-new rtx that
21851 represents the result of the compare. */
21853 static rtx
21854 rs6000_generate_compare (rtx cmp, machine_mode mode)
21856 machine_mode comp_mode;
21857 rtx compare_result;
21858 enum rtx_code code = GET_CODE (cmp);
21859 rtx op0 = XEXP (cmp, 0);
21860 rtx op1 = XEXP (cmp, 1);
21862 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21863 comp_mode = CCmode;
21864 else if (FLOAT_MODE_P (mode))
21865 comp_mode = CCFPmode;
21866 else if (code == GTU || code == LTU
21867 || code == GEU || code == LEU)
21868 comp_mode = CCUNSmode;
21869 else if ((code == EQ || code == NE)
21870 && unsigned_reg_p (op0)
21871 && (unsigned_reg_p (op1)
21872 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21873 /* These are unsigned values, perhaps there will be a later
21874 ordering compare that can be shared with this one. */
21875 comp_mode = CCUNSmode;
21876 else
21877 comp_mode = CCmode;
21879 /* If we have an unsigned compare, make sure we don't have a signed value as
21880 an immediate. */
21881 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21882 && INTVAL (op1) < 0)
21884 op0 = copy_rtx_if_shared (op0);
21885 op1 = force_reg (GET_MODE (op0), op1);
21886 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21889 /* First, the compare. */
21890 compare_result = gen_reg_rtx (comp_mode);
21892 /* IEEE 128-bit support in VSX registers when we do not have hardware
21893 support. */
21894 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21896 rtx libfunc = NULL_RTX;
21897 bool check_nan = false;
21898 rtx dest;
21900 switch (code)
21902 case EQ:
21903 case NE:
21904 libfunc = optab_libfunc (eq_optab, mode);
21905 break;
21907 case GT:
21908 case GE:
21909 libfunc = optab_libfunc (ge_optab, mode);
21910 break;
21912 case LT:
21913 case LE:
21914 libfunc = optab_libfunc (le_optab, mode);
21915 break;
21917 case UNORDERED:
21918 case ORDERED:
21919 libfunc = optab_libfunc (unord_optab, mode);
21920 code = (code == UNORDERED) ? NE : EQ;
21921 break;
21923 case UNGE:
21924 case UNGT:
21925 check_nan = true;
21926 libfunc = optab_libfunc (ge_optab, mode);
21927 code = (code == UNGE) ? GE : GT;
21928 break;
21930 case UNLE:
21931 case UNLT:
21932 check_nan = true;
21933 libfunc = optab_libfunc (le_optab, mode);
21934 code = (code == UNLE) ? LE : LT;
21935 break;
21937 case UNEQ:
21938 case LTGT:
21939 check_nan = true;
21940 libfunc = optab_libfunc (eq_optab, mode);
21941 code = (code = UNEQ) ? EQ : NE;
21942 break;
21944 default:
21945 gcc_unreachable ();
21948 gcc_assert (libfunc);
21950 if (!check_nan)
21951 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21952 SImode, op0, mode, op1, mode);
21954 /* The library signals an exception for signalling NaNs, so we need to
21955 handle isgreater, etc. by first checking isordered. */
21956 else
21958 rtx ne_rtx, normal_dest, unord_dest;
21959 rtx unord_func = optab_libfunc (unord_optab, mode);
21960 rtx join_label = gen_label_rtx ();
21961 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21962 rtx unord_cmp = gen_reg_rtx (comp_mode);
21965 /* Test for either value being a NaN. */
21966 gcc_assert (unord_func);
21967 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21968 SImode, op0, mode, op1, mode);
21970 /* Set value (0) if either value is a NaN, and jump to the join
21971 label. */
21972 dest = gen_reg_rtx (SImode);
21973 emit_move_insn (dest, const1_rtx);
21974 emit_insn (gen_rtx_SET (unord_cmp,
21975 gen_rtx_COMPARE (comp_mode, unord_dest,
21976 const0_rtx)));
21978 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21979 emit_jump_insn (gen_rtx_SET (pc_rtx,
21980 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21981 join_ref,
21982 pc_rtx)));
21984 /* Do the normal comparison, knowing that the values are not
21985 NaNs. */
21986 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21987 SImode, op0, mode, op1, mode);
21989 emit_insn (gen_cstoresi4 (dest,
21990 gen_rtx_fmt_ee (code, SImode, normal_dest,
21991 const0_rtx),
21992 normal_dest, const0_rtx));
21994 /* Join NaN and non-Nan paths. Compare dest against 0. */
21995 emit_label (join_label);
21996 code = NE;
21999 emit_insn (gen_rtx_SET (compare_result,
22000 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22003 else
22005 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22006 CLOBBERs to match cmptf_internal2 pattern. */
22007 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22008 && FLOAT128_IBM_P (GET_MODE (op0))
22009 && TARGET_HARD_FLOAT)
22010 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22011 gen_rtvec (10,
22012 gen_rtx_SET (compare_result,
22013 gen_rtx_COMPARE (comp_mode, op0, op1)),
22014 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22015 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22016 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22017 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22018 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22019 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22020 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22021 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22022 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22023 else if (GET_CODE (op1) == UNSPEC
22024 && XINT (op1, 1) == UNSPEC_SP_TEST)
22026 rtx op1b = XVECEXP (op1, 0, 0);
22027 comp_mode = CCEQmode;
22028 compare_result = gen_reg_rtx (CCEQmode);
22029 if (TARGET_64BIT)
22030 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22031 else
22032 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22034 else
22035 emit_insn (gen_rtx_SET (compare_result,
22036 gen_rtx_COMPARE (comp_mode, op0, op1)));
22039 /* Some kinds of FP comparisons need an OR operation;
22040 under flag_finite_math_only we don't bother. */
22041 if (FLOAT_MODE_P (mode)
22042 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22043 && !flag_finite_math_only
22044 && (code == LE || code == GE
22045 || code == UNEQ || code == LTGT
22046 || code == UNGT || code == UNLT))
22048 enum rtx_code or1, or2;
22049 rtx or1_rtx, or2_rtx, compare2_rtx;
22050 rtx or_result = gen_reg_rtx (CCEQmode);
22052 switch (code)
22054 case LE: or1 = LT; or2 = EQ; break;
22055 case GE: or1 = GT; or2 = EQ; break;
22056 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22057 case LTGT: or1 = LT; or2 = GT; break;
22058 case UNGT: or1 = UNORDERED; or2 = GT; break;
22059 case UNLT: or1 = UNORDERED; or2 = LT; break;
22060 default: gcc_unreachable ();
22062 validate_condition_mode (or1, comp_mode);
22063 validate_condition_mode (or2, comp_mode);
22064 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22065 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22066 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22067 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22068 const_true_rtx);
22069 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22071 compare_result = or_result;
22072 code = EQ;
22075 validate_condition_mode (code, GET_MODE (compare_result));
22077 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22081 /* Return the diagnostic message string if the binary operation OP is
22082 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22084 static const char*
22085 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22086 const_tree type1,
22087 const_tree type2)
22089 machine_mode mode1 = TYPE_MODE (type1);
22090 machine_mode mode2 = TYPE_MODE (type2);
22092 /* For complex modes, use the inner type. */
22093 if (COMPLEX_MODE_P (mode1))
22094 mode1 = GET_MODE_INNER (mode1);
22096 if (COMPLEX_MODE_P (mode2))
22097 mode2 = GET_MODE_INNER (mode2);
22099 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22100 double to intermix unless -mfloat128-convert. */
22101 if (mode1 == mode2)
22102 return NULL;
22104 if (!TARGET_FLOAT128_CVT)
22106 if ((mode1 == KFmode && mode2 == IFmode)
22107 || (mode1 == IFmode && mode2 == KFmode))
22108 return N_("__float128 and __ibm128 cannot be used in the same "
22109 "expression");
22111 if (TARGET_IEEEQUAD
22112 && ((mode1 == IFmode && mode2 == TFmode)
22113 || (mode1 == TFmode && mode2 == IFmode)))
22114 return N_("__ibm128 and long double cannot be used in the same "
22115 "expression");
22117 if (!TARGET_IEEEQUAD
22118 && ((mode1 == KFmode && mode2 == TFmode)
22119 || (mode1 == TFmode && mode2 == KFmode)))
22120 return N_("__float128 and long double cannot be used in the same "
22121 "expression");
22124 return NULL;
22128 /* Expand floating point conversion to/from __float128 and __ibm128. */
22130 void
22131 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22133 machine_mode dest_mode = GET_MODE (dest);
22134 machine_mode src_mode = GET_MODE (src);
22135 convert_optab cvt = unknown_optab;
22136 bool do_move = false;
22137 rtx libfunc = NULL_RTX;
22138 rtx dest2;
22139 typedef rtx (*rtx_2func_t) (rtx, rtx);
22140 rtx_2func_t hw_convert = (rtx_2func_t)0;
22141 size_t kf_or_tf;
22143 struct hw_conv_t {
22144 rtx_2func_t from_df;
22145 rtx_2func_t from_sf;
22146 rtx_2func_t from_si_sign;
22147 rtx_2func_t from_si_uns;
22148 rtx_2func_t from_di_sign;
22149 rtx_2func_t from_di_uns;
22150 rtx_2func_t to_df;
22151 rtx_2func_t to_sf;
22152 rtx_2func_t to_si_sign;
22153 rtx_2func_t to_si_uns;
22154 rtx_2func_t to_di_sign;
22155 rtx_2func_t to_di_uns;
22156 } hw_conversions[2] = {
22157 /* convertions to/from KFmode */
22159 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22160 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22161 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22162 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22163 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22164 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22165 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22166 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22167 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22168 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22169 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22170 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22173 /* convertions to/from TFmode */
22175 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22176 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22177 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22178 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22179 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22180 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22181 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22182 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22183 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22184 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22185 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22186 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22190 if (dest_mode == src_mode)
22191 gcc_unreachable ();
22193 /* Eliminate memory operations. */
22194 if (MEM_P (src))
22195 src = force_reg (src_mode, src);
22197 if (MEM_P (dest))
22199 rtx tmp = gen_reg_rtx (dest_mode);
22200 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22201 rs6000_emit_move (dest, tmp, dest_mode);
22202 return;
22205 /* Convert to IEEE 128-bit floating point. */
22206 if (FLOAT128_IEEE_P (dest_mode))
22208 if (dest_mode == KFmode)
22209 kf_or_tf = 0;
22210 else if (dest_mode == TFmode)
22211 kf_or_tf = 1;
22212 else
22213 gcc_unreachable ();
22215 switch (src_mode)
22217 case E_DFmode:
22218 cvt = sext_optab;
22219 hw_convert = hw_conversions[kf_or_tf].from_df;
22220 break;
22222 case E_SFmode:
22223 cvt = sext_optab;
22224 hw_convert = hw_conversions[kf_or_tf].from_sf;
22225 break;
22227 case E_KFmode:
22228 case E_IFmode:
22229 case E_TFmode:
22230 if (FLOAT128_IBM_P (src_mode))
22231 cvt = sext_optab;
22232 else
22233 do_move = true;
22234 break;
22236 case E_SImode:
22237 if (unsigned_p)
22239 cvt = ufloat_optab;
22240 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22242 else
22244 cvt = sfloat_optab;
22245 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22247 break;
22249 case E_DImode:
22250 if (unsigned_p)
22252 cvt = ufloat_optab;
22253 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22255 else
22257 cvt = sfloat_optab;
22258 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22260 break;
22262 default:
22263 gcc_unreachable ();
22267 /* Convert from IEEE 128-bit floating point. */
22268 else if (FLOAT128_IEEE_P (src_mode))
22270 if (src_mode == KFmode)
22271 kf_or_tf = 0;
22272 else if (src_mode == TFmode)
22273 kf_or_tf = 1;
22274 else
22275 gcc_unreachable ();
22277 switch (dest_mode)
22279 case E_DFmode:
22280 cvt = trunc_optab;
22281 hw_convert = hw_conversions[kf_or_tf].to_df;
22282 break;
22284 case E_SFmode:
22285 cvt = trunc_optab;
22286 hw_convert = hw_conversions[kf_or_tf].to_sf;
22287 break;
22289 case E_KFmode:
22290 case E_IFmode:
22291 case E_TFmode:
22292 if (FLOAT128_IBM_P (dest_mode))
22293 cvt = trunc_optab;
22294 else
22295 do_move = true;
22296 break;
22298 case E_SImode:
22299 if (unsigned_p)
22301 cvt = ufix_optab;
22302 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22304 else
22306 cvt = sfix_optab;
22307 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22309 break;
22311 case E_DImode:
22312 if (unsigned_p)
22314 cvt = ufix_optab;
22315 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22317 else
22319 cvt = sfix_optab;
22320 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22322 break;
22324 default:
22325 gcc_unreachable ();
22329 /* Both IBM format. */
22330 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22331 do_move = true;
22333 else
22334 gcc_unreachable ();
22336 /* Handle conversion between TFmode/KFmode. */
22337 if (do_move)
22338 emit_move_insn (dest, gen_lowpart (dest_mode, src));
22340 /* Handle conversion if we have hardware support. */
22341 else if (TARGET_FLOAT128_HW && hw_convert)
22342 emit_insn ((hw_convert) (dest, src));
22344 /* Call an external function to do the conversion. */
22345 else if (cvt != unknown_optab)
22347 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22348 gcc_assert (libfunc != NULL_RTX);
22350 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22351 src, src_mode);
22353 gcc_assert (dest2 != NULL_RTX);
22354 if (!rtx_equal_p (dest, dest2))
22355 emit_move_insn (dest, dest2);
22358 else
22359 gcc_unreachable ();
22361 return;
22365 /* Emit the RTL for an sISEL pattern. */
22367 void
22368 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
22370 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
22373 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22374 can be used as that dest register. Return the dest register. */
22377 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22379 if (op2 == const0_rtx)
22380 return op1;
22382 if (GET_CODE (scratch) == SCRATCH)
22383 scratch = gen_reg_rtx (mode);
22385 if (logical_operand (op2, mode))
22386 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22387 else
22388 emit_insn (gen_rtx_SET (scratch,
22389 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22391 return scratch;
22394 void
22395 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22397 rtx condition_rtx;
22398 machine_mode op_mode;
22399 enum rtx_code cond_code;
22400 rtx result = operands[0];
22402 condition_rtx = rs6000_generate_compare (operands[1], mode);
22403 cond_code = GET_CODE (condition_rtx);
22405 if (cond_code == NE
22406 || cond_code == GE || cond_code == LE
22407 || cond_code == GEU || cond_code == LEU
22408 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22410 rtx not_result = gen_reg_rtx (CCEQmode);
22411 rtx not_op, rev_cond_rtx;
22412 machine_mode cc_mode;
22414 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22416 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22417 SImode, XEXP (condition_rtx, 0), const0_rtx);
22418 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22419 emit_insn (gen_rtx_SET (not_result, not_op));
22420 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22423 op_mode = GET_MODE (XEXP (operands[1], 0));
22424 if (op_mode == VOIDmode)
22425 op_mode = GET_MODE (XEXP (operands[1], 1));
22427 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22429 PUT_MODE (condition_rtx, DImode);
22430 convert_move (result, condition_rtx, 0);
22432 else
22434 PUT_MODE (condition_rtx, SImode);
22435 emit_insn (gen_rtx_SET (result, condition_rtx));
22439 /* Emit a branch of kind CODE to location LOC. */
22441 void
22442 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22444 rtx condition_rtx, loc_ref;
22446 condition_rtx = rs6000_generate_compare (operands[0], mode);
22447 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22448 emit_jump_insn (gen_rtx_SET (pc_rtx,
22449 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22450 loc_ref, pc_rtx)));
22453 /* Return the string to output a conditional branch to LABEL, which is
22454 the operand template of the label, or NULL if the branch is really a
22455 conditional return.
22457 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22458 condition code register and its mode specifies what kind of
22459 comparison we made.
22461 REVERSED is nonzero if we should reverse the sense of the comparison.
22463 INSN is the insn. */
22465 char *
22466 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22468 static char string[64];
22469 enum rtx_code code = GET_CODE (op);
22470 rtx cc_reg = XEXP (op, 0);
22471 machine_mode mode = GET_MODE (cc_reg);
22472 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22473 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22474 int really_reversed = reversed ^ need_longbranch;
22475 char *s = string;
22476 const char *ccode;
22477 const char *pred;
22478 rtx note;
22480 validate_condition_mode (code, mode);
22482 /* Work out which way this really branches. We could use
22483 reverse_condition_maybe_unordered here always but this
22484 makes the resulting assembler clearer. */
22485 if (really_reversed)
22487 /* Reversal of FP compares takes care -- an ordered compare
22488 becomes an unordered compare and vice versa. */
22489 if (mode == CCFPmode)
22490 code = reverse_condition_maybe_unordered (code);
22491 else
22492 code = reverse_condition (code);
22495 switch (code)
22497 /* Not all of these are actually distinct opcodes, but
22498 we distinguish them for clarity of the resulting assembler. */
22499 case NE: case LTGT:
22500 ccode = "ne"; break;
22501 case EQ: case UNEQ:
22502 ccode = "eq"; break;
22503 case GE: case GEU:
22504 ccode = "ge"; break;
22505 case GT: case GTU: case UNGT:
22506 ccode = "gt"; break;
22507 case LE: case LEU:
22508 ccode = "le"; break;
22509 case LT: case LTU: case UNLT:
22510 ccode = "lt"; break;
22511 case UNORDERED: ccode = "un"; break;
22512 case ORDERED: ccode = "nu"; break;
22513 case UNGE: ccode = "nl"; break;
22514 case UNLE: ccode = "ng"; break;
22515 default:
22516 gcc_unreachable ();
22519 /* Maybe we have a guess as to how likely the branch is. */
22520 pred = "";
22521 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22522 if (note != NULL_RTX)
22524 /* PROB is the difference from 50%. */
22525 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22526 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22528 /* Only hint for highly probable/improbable branches on newer cpus when
22529 we have real profile data, as static prediction overrides processor
22530 dynamic prediction. For older cpus we may as well always hint, but
22531 assume not taken for branches that are very close to 50% as a
22532 mispredicted taken branch is more expensive than a
22533 mispredicted not-taken branch. */
22534 if (rs6000_always_hint
22535 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22536 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22537 && br_prob_note_reliable_p (note)))
22539 if (abs (prob) > REG_BR_PROB_BASE / 20
22540 && ((prob > 0) ^ need_longbranch))
22541 pred = "+";
22542 else
22543 pred = "-";
22547 if (label == NULL)
22548 s += sprintf (s, "b%slr%s ", ccode, pred);
22549 else
22550 s += sprintf (s, "b%s%s ", ccode, pred);
22552 /* We need to escape any '%' characters in the reg_names string.
22553 Assume they'd only be the first character.... */
22554 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22555 *s++ = '%';
22556 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22558 if (label != NULL)
22560 /* If the branch distance was too far, we may have to use an
22561 unconditional branch to go the distance. */
22562 if (need_longbranch)
22563 s += sprintf (s, ",$+8\n\tb %s", label);
22564 else
22565 s += sprintf (s, ",%s", label);
22568 return string;
22571 /* Return insn for VSX or Altivec comparisons. */
22573 static rtx
22574 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22576 rtx mask;
22577 machine_mode mode = GET_MODE (op0);
22579 switch (code)
22581 default:
22582 break;
22584 case GE:
22585 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22586 return NULL_RTX;
22587 /* FALLTHRU */
22589 case EQ:
22590 case GT:
22591 case GTU:
22592 case ORDERED:
22593 case UNORDERED:
22594 case UNEQ:
22595 case LTGT:
22596 mask = gen_reg_rtx (mode);
22597 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22598 return mask;
22601 return NULL_RTX;
22604 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22605 DMODE is expected destination mode. This is a recursive function. */
22607 static rtx
22608 rs6000_emit_vector_compare (enum rtx_code rcode,
22609 rtx op0, rtx op1,
22610 machine_mode dmode)
22612 rtx mask;
22613 bool swap_operands = false;
22614 bool try_again = false;
22616 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22617 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22619 /* See if the comparison works as is. */
22620 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22621 if (mask)
22622 return mask;
22624 switch (rcode)
22626 case LT:
22627 rcode = GT;
22628 swap_operands = true;
22629 try_again = true;
22630 break;
22631 case LTU:
22632 rcode = GTU;
22633 swap_operands = true;
22634 try_again = true;
22635 break;
22636 case NE:
22637 case UNLE:
22638 case UNLT:
22639 case UNGE:
22640 case UNGT:
22641 /* Invert condition and try again.
22642 e.g., A != B becomes ~(A==B). */
22644 enum rtx_code rev_code;
22645 enum insn_code nor_code;
22646 rtx mask2;
22648 rev_code = reverse_condition_maybe_unordered (rcode);
22649 if (rev_code == UNKNOWN)
22650 return NULL_RTX;
22652 nor_code = optab_handler (one_cmpl_optab, dmode);
22653 if (nor_code == CODE_FOR_nothing)
22654 return NULL_RTX;
22656 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22657 if (!mask2)
22658 return NULL_RTX;
22660 mask = gen_reg_rtx (dmode);
22661 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22662 return mask;
22664 break;
22665 case GE:
22666 case GEU:
22667 case LE:
22668 case LEU:
22669 /* Try GT/GTU/LT/LTU OR EQ */
22671 rtx c_rtx, eq_rtx;
22672 enum insn_code ior_code;
22673 enum rtx_code new_code;
22675 switch (rcode)
22677 case GE:
22678 new_code = GT;
22679 break;
22681 case GEU:
22682 new_code = GTU;
22683 break;
22685 case LE:
22686 new_code = LT;
22687 break;
22689 case LEU:
22690 new_code = LTU;
22691 break;
22693 default:
22694 gcc_unreachable ();
22697 ior_code = optab_handler (ior_optab, dmode);
22698 if (ior_code == CODE_FOR_nothing)
22699 return NULL_RTX;
22701 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22702 if (!c_rtx)
22703 return NULL_RTX;
22705 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22706 if (!eq_rtx)
22707 return NULL_RTX;
22709 mask = gen_reg_rtx (dmode);
22710 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22711 return mask;
22713 break;
22714 default:
22715 return NULL_RTX;
22718 if (try_again)
22720 if (swap_operands)
22721 std::swap (op0, op1);
22723 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22724 if (mask)
22725 return mask;
22728 /* You only get two chances. */
22729 return NULL_RTX;
22732 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22733 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22734 operands for the relation operation COND. */
22737 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22738 rtx cond, rtx cc_op0, rtx cc_op1)
22740 machine_mode dest_mode = GET_MODE (dest);
22741 machine_mode mask_mode = GET_MODE (cc_op0);
22742 enum rtx_code rcode = GET_CODE (cond);
22743 machine_mode cc_mode = CCmode;
22744 rtx mask;
22745 rtx cond2;
22746 bool invert_move = false;
22748 if (VECTOR_UNIT_NONE_P (dest_mode))
22749 return 0;
22751 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22752 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22754 switch (rcode)
22756 /* Swap operands if we can, and fall back to doing the operation as
22757 specified, and doing a NOR to invert the test. */
22758 case NE:
22759 case UNLE:
22760 case UNLT:
22761 case UNGE:
22762 case UNGT:
22763 /* Invert condition and try again.
22764 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22765 invert_move = true;
22766 rcode = reverse_condition_maybe_unordered (rcode);
22767 if (rcode == UNKNOWN)
22768 return 0;
22769 break;
22771 case GE:
22772 case LE:
22773 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22775 /* Invert condition to avoid compound test. */
22776 invert_move = true;
22777 rcode = reverse_condition (rcode);
22779 break;
22781 case GTU:
22782 case GEU:
22783 case LTU:
22784 case LEU:
22785 /* Mark unsigned tests with CCUNSmode. */
22786 cc_mode = CCUNSmode;
22788 /* Invert condition to avoid compound test if necessary. */
22789 if (rcode == GEU || rcode == LEU)
22791 invert_move = true;
22792 rcode = reverse_condition (rcode);
22794 break;
22796 default:
22797 break;
22800 /* Get the vector mask for the given relational operations. */
22801 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22803 if (!mask)
22804 return 0;
22806 if (invert_move)
22807 std::swap (op_true, op_false);
22809 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22810 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22811 && (GET_CODE (op_true) == CONST_VECTOR
22812 || GET_CODE (op_false) == CONST_VECTOR))
22814 rtx constant_0 = CONST0_RTX (dest_mode);
22815 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22817 if (op_true == constant_m1 && op_false == constant_0)
22819 emit_move_insn (dest, mask);
22820 return 1;
22823 else if (op_true == constant_0 && op_false == constant_m1)
22825 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22826 return 1;
22829 /* If we can't use the vector comparison directly, perhaps we can use
22830 the mask for the true or false fields, instead of loading up a
22831 constant. */
22832 if (op_true == constant_m1)
22833 op_true = mask;
22835 if (op_false == constant_0)
22836 op_false = mask;
22839 if (!REG_P (op_true) && !SUBREG_P (op_true))
22840 op_true = force_reg (dest_mode, op_true);
22842 if (!REG_P (op_false) && !SUBREG_P (op_false))
22843 op_false = force_reg (dest_mode, op_false);
22845 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22846 CONST0_RTX (dest_mode));
22847 emit_insn (gen_rtx_SET (dest,
22848 gen_rtx_IF_THEN_ELSE (dest_mode,
22849 cond2,
22850 op_true,
22851 op_false)));
22852 return 1;
22855 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22856 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22857 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22858 hardware has no such operation. */
22860 static int
22861 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22863 enum rtx_code code = GET_CODE (op);
22864 rtx op0 = XEXP (op, 0);
22865 rtx op1 = XEXP (op, 1);
22866 machine_mode compare_mode = GET_MODE (op0);
22867 machine_mode result_mode = GET_MODE (dest);
22868 bool max_p = false;
22870 if (result_mode != compare_mode)
22871 return 0;
22873 if (code == GE || code == GT)
22874 max_p = true;
22875 else if (code == LE || code == LT)
22876 max_p = false;
22877 else
22878 return 0;
22880 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22883 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22884 max_p = !max_p;
22886 else
22887 return 0;
22889 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22890 return 1;
22893 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22894 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22895 operands of the last comparison is nonzero/true, FALSE_COND if it is
22896 zero/false. Return 0 if the hardware has no such operation. */
22898 static int
22899 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22901 enum rtx_code code = GET_CODE (op);
22902 rtx op0 = XEXP (op, 0);
22903 rtx op1 = XEXP (op, 1);
22904 machine_mode result_mode = GET_MODE (dest);
22905 rtx compare_rtx;
22906 rtx cmove_rtx;
22907 rtx clobber_rtx;
22909 if (!can_create_pseudo_p ())
22910 return 0;
22912 switch (code)
22914 case EQ:
22915 case GE:
22916 case GT:
22917 break;
22919 case NE:
22920 case LT:
22921 case LE:
22922 code = swap_condition (code);
22923 std::swap (op0, op1);
22924 break;
22926 default:
22927 return 0;
22930 /* Generate: [(parallel [(set (dest)
22931 (if_then_else (op (cmp1) (cmp2))
22932 (true)
22933 (false)))
22934 (clobber (scratch))])]. */
22936 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22937 cmove_rtx = gen_rtx_SET (dest,
22938 gen_rtx_IF_THEN_ELSE (result_mode,
22939 compare_rtx,
22940 true_cond,
22941 false_cond));
22943 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22944 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22945 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22947 return 1;
22950 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22951 operands of the last comparison is nonzero/true, FALSE_COND if it
22952 is zero/false. Return 0 if the hardware has no such operation. */
22955 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22957 enum rtx_code code = GET_CODE (op);
22958 rtx op0 = XEXP (op, 0);
22959 rtx op1 = XEXP (op, 1);
22960 machine_mode compare_mode = GET_MODE (op0);
22961 machine_mode result_mode = GET_MODE (dest);
22962 rtx temp;
22963 bool is_against_zero;
22965 /* These modes should always match. */
22966 if (GET_MODE (op1) != compare_mode
22967 /* In the isel case however, we can use a compare immediate, so
22968 op1 may be a small constant. */
22969 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22970 return 0;
22971 if (GET_MODE (true_cond) != result_mode)
22972 return 0;
22973 if (GET_MODE (false_cond) != result_mode)
22974 return 0;
22976 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22977 if (TARGET_P9_MINMAX
22978 && (compare_mode == SFmode || compare_mode == DFmode)
22979 && (result_mode == SFmode || result_mode == DFmode))
22981 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22982 return 1;
22984 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22985 return 1;
22988 /* Don't allow using floating point comparisons for integer results for
22989 now. */
22990 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22991 return 0;
22993 /* First, work out if the hardware can do this at all, or
22994 if it's too slow.... */
22995 if (!FLOAT_MODE_P (compare_mode))
22997 if (TARGET_ISEL)
22998 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22999 return 0;
23002 is_against_zero = op1 == CONST0_RTX (compare_mode);
23004 /* A floating-point subtract might overflow, underflow, or produce
23005 an inexact result, thus changing the floating-point flags, so it
23006 can't be generated if we care about that. It's safe if one side
23007 of the construct is zero, since then no subtract will be
23008 generated. */
23009 if (SCALAR_FLOAT_MODE_P (compare_mode)
23010 && flag_trapping_math && ! is_against_zero)
23011 return 0;
23013 /* Eliminate half of the comparisons by switching operands, this
23014 makes the remaining code simpler. */
23015 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23016 || code == LTGT || code == LT || code == UNLE)
23018 code = reverse_condition_maybe_unordered (code);
23019 temp = true_cond;
23020 true_cond = false_cond;
23021 false_cond = temp;
23024 /* UNEQ and LTGT take four instructions for a comparison with zero,
23025 it'll probably be faster to use a branch here too. */
23026 if (code == UNEQ && HONOR_NANS (compare_mode))
23027 return 0;
23029 /* We're going to try to implement comparisons by performing
23030 a subtract, then comparing against zero. Unfortunately,
23031 Inf - Inf is NaN which is not zero, and so if we don't
23032 know that the operand is finite and the comparison
23033 would treat EQ different to UNORDERED, we can't do it. */
23034 if (HONOR_INFINITIES (compare_mode)
23035 && code != GT && code != UNGE
23036 && (GET_CODE (op1) != CONST_DOUBLE
23037 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23038 /* Constructs of the form (a OP b ? a : b) are safe. */
23039 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23040 || (! rtx_equal_p (op0, true_cond)
23041 && ! rtx_equal_p (op1, true_cond))))
23042 return 0;
23044 /* At this point we know we can use fsel. */
23046 /* Reduce the comparison to a comparison against zero. */
23047 if (! is_against_zero)
23049 temp = gen_reg_rtx (compare_mode);
23050 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23051 op0 = temp;
23052 op1 = CONST0_RTX (compare_mode);
23055 /* If we don't care about NaNs we can reduce some of the comparisons
23056 down to faster ones. */
23057 if (! HONOR_NANS (compare_mode))
23058 switch (code)
23060 case GT:
23061 code = LE;
23062 temp = true_cond;
23063 true_cond = false_cond;
23064 false_cond = temp;
23065 break;
23066 case UNGE:
23067 code = GE;
23068 break;
23069 case UNEQ:
23070 code = EQ;
23071 break;
23072 default:
23073 break;
23076 /* Now, reduce everything down to a GE. */
23077 switch (code)
23079 case GE:
23080 break;
23082 case LE:
23083 temp = gen_reg_rtx (compare_mode);
23084 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23085 op0 = temp;
23086 break;
23088 case ORDERED:
23089 temp = gen_reg_rtx (compare_mode);
23090 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23091 op0 = temp;
23092 break;
23094 case EQ:
23095 temp = gen_reg_rtx (compare_mode);
23096 emit_insn (gen_rtx_SET (temp,
23097 gen_rtx_NEG (compare_mode,
23098 gen_rtx_ABS (compare_mode, op0))));
23099 op0 = temp;
23100 break;
23102 case UNGE:
23103 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23104 temp = gen_reg_rtx (result_mode);
23105 emit_insn (gen_rtx_SET (temp,
23106 gen_rtx_IF_THEN_ELSE (result_mode,
23107 gen_rtx_GE (VOIDmode,
23108 op0, op1),
23109 true_cond, false_cond)));
23110 false_cond = true_cond;
23111 true_cond = temp;
23113 temp = gen_reg_rtx (compare_mode);
23114 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23115 op0 = temp;
23116 break;
23118 case GT:
23119 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23120 temp = gen_reg_rtx (result_mode);
23121 emit_insn (gen_rtx_SET (temp,
23122 gen_rtx_IF_THEN_ELSE (result_mode,
23123 gen_rtx_GE (VOIDmode,
23124 op0, op1),
23125 true_cond, false_cond)));
23126 true_cond = false_cond;
23127 false_cond = temp;
23129 temp = gen_reg_rtx (compare_mode);
23130 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23131 op0 = temp;
23132 break;
23134 default:
23135 gcc_unreachable ();
23138 emit_insn (gen_rtx_SET (dest,
23139 gen_rtx_IF_THEN_ELSE (result_mode,
23140 gen_rtx_GE (VOIDmode,
23141 op0, op1),
23142 true_cond, false_cond)));
23143 return 1;
23146 /* Same as above, but for ints (isel). */
23148 static int
23149 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23151 rtx condition_rtx, cr;
23152 machine_mode mode = GET_MODE (dest);
23153 enum rtx_code cond_code;
23154 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23155 bool signedp;
23157 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23158 return 0;
23160 /* We still have to do the compare, because isel doesn't do a
23161 compare, it just looks at the CRx bits set by a previous compare
23162 instruction. */
23163 condition_rtx = rs6000_generate_compare (op, mode);
23164 cond_code = GET_CODE (condition_rtx);
23165 cr = XEXP (condition_rtx, 0);
23166 signedp = GET_MODE (cr) == CCmode;
23168 isel_func = (mode == SImode
23169 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23170 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23172 switch (cond_code)
23174 case LT: case GT: case LTU: case GTU: case EQ:
23175 /* isel handles these directly. */
23176 break;
23178 default:
23179 /* We need to swap the sense of the comparison. */
23181 std::swap (false_cond, true_cond);
23182 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23184 break;
23187 false_cond = force_reg (mode, false_cond);
23188 if (true_cond != const0_rtx)
23189 true_cond = force_reg (mode, true_cond);
23191 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23193 return 1;
23196 const char *
23197 output_isel (rtx *operands)
23199 enum rtx_code code;
23201 code = GET_CODE (operands[1]);
23203 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
23205 gcc_assert (GET_CODE (operands[2]) == REG
23206 && GET_CODE (operands[3]) == REG);
23207 PUT_CODE (operands[1], reverse_condition (code));
23208 return "isel %0,%3,%2,%j1";
23211 return "isel %0,%2,%3,%j1";
23214 void
23215 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23217 machine_mode mode = GET_MODE (op0);
23218 enum rtx_code c;
23219 rtx target;
23221 /* VSX/altivec have direct min/max insns. */
23222 if ((code == SMAX || code == SMIN)
23223 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23224 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23226 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23227 return;
23230 if (code == SMAX || code == SMIN)
23231 c = GE;
23232 else
23233 c = GEU;
23235 if (code == SMAX || code == UMAX)
23236 target = emit_conditional_move (dest, c, op0, op1, mode,
23237 op0, op1, mode, 0);
23238 else
23239 target = emit_conditional_move (dest, c, op0, op1, mode,
23240 op1, op0, mode, 0);
23241 gcc_assert (target);
23242 if (target != dest)
23243 emit_move_insn (dest, target);
23246 /* Split a signbit operation on 64-bit machines with direct move. Also allow
23247 for the value to come from memory or if it is already loaded into a GPR. */
23249 void
23250 rs6000_split_signbit (rtx dest, rtx src)
23252 machine_mode d_mode = GET_MODE (dest);
23253 machine_mode s_mode = GET_MODE (src);
23254 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
23255 rtx shift_reg = dest_di;
23257 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
23259 if (MEM_P (src))
23261 rtx mem = (WORDS_BIG_ENDIAN
23262 ? adjust_address (src, DImode, 0)
23263 : adjust_address (src, DImode, 8));
23264 emit_insn (gen_rtx_SET (dest_di, mem));
23267 else
23269 unsigned int r = reg_or_subregno (src);
23271 if (INT_REGNO_P (r))
23272 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
23274 else
23276 /* Generate the special mfvsrd instruction to get it in a GPR. */
23277 gcc_assert (VSX_REGNO_P (r));
23278 if (s_mode == KFmode)
23279 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
23280 else
23281 emit_insn (gen_signbittf2_dm2 (dest_di, src));
23285 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
23286 return;
23289 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23290 COND is true. Mark the jump as unlikely to be taken. */
23292 static void
23293 emit_unlikely_jump (rtx cond, rtx label)
23295 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23296 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23297 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23300 /* A subroutine of the atomic operation splitters. Emit a load-locked
23301 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23302 the zero_extend operation. */
23304 static void
23305 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23307 rtx (*fn) (rtx, rtx) = NULL;
23309 switch (mode)
23311 case E_QImode:
23312 fn = gen_load_lockedqi;
23313 break;
23314 case E_HImode:
23315 fn = gen_load_lockedhi;
23316 break;
23317 case E_SImode:
23318 if (GET_MODE (mem) == QImode)
23319 fn = gen_load_lockedqi_si;
23320 else if (GET_MODE (mem) == HImode)
23321 fn = gen_load_lockedhi_si;
23322 else
23323 fn = gen_load_lockedsi;
23324 break;
23325 case E_DImode:
23326 fn = gen_load_lockeddi;
23327 break;
23328 case E_TImode:
23329 fn = gen_load_lockedti;
23330 break;
23331 default:
23332 gcc_unreachable ();
23334 emit_insn (fn (reg, mem));
23337 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23338 instruction in MODE. */
23340 static void
23341 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23343 rtx (*fn) (rtx, rtx, rtx) = NULL;
23345 switch (mode)
23347 case E_QImode:
23348 fn = gen_store_conditionalqi;
23349 break;
23350 case E_HImode:
23351 fn = gen_store_conditionalhi;
23352 break;
23353 case E_SImode:
23354 fn = gen_store_conditionalsi;
23355 break;
23356 case E_DImode:
23357 fn = gen_store_conditionaldi;
23358 break;
23359 case E_TImode:
23360 fn = gen_store_conditionalti;
23361 break;
23362 default:
23363 gcc_unreachable ();
23366 /* Emit sync before stwcx. to address PPC405 Erratum. */
23367 if (PPC405_ERRATUM77)
23368 emit_insn (gen_hwsync ());
23370 emit_insn (fn (res, mem, val));
23373 /* Expand barriers before and after a load_locked/store_cond sequence. */
23375 static rtx
23376 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23378 rtx addr = XEXP (mem, 0);
23380 if (!legitimate_indirect_address_p (addr, reload_completed)
23381 && !legitimate_indexed_address_p (addr, reload_completed))
23383 addr = force_reg (Pmode, addr);
23384 mem = replace_equiv_address_nv (mem, addr);
23387 switch (model)
23389 case MEMMODEL_RELAXED:
23390 case MEMMODEL_CONSUME:
23391 case MEMMODEL_ACQUIRE:
23392 break;
23393 case MEMMODEL_RELEASE:
23394 case MEMMODEL_ACQ_REL:
23395 emit_insn (gen_lwsync ());
23396 break;
23397 case MEMMODEL_SEQ_CST:
23398 emit_insn (gen_hwsync ());
23399 break;
23400 default:
23401 gcc_unreachable ();
23403 return mem;
23406 static void
23407 rs6000_post_atomic_barrier (enum memmodel model)
23409 switch (model)
23411 case MEMMODEL_RELAXED:
23412 case MEMMODEL_CONSUME:
23413 case MEMMODEL_RELEASE:
23414 break;
23415 case MEMMODEL_ACQUIRE:
23416 case MEMMODEL_ACQ_REL:
23417 case MEMMODEL_SEQ_CST:
23418 emit_insn (gen_isync ());
23419 break;
23420 default:
23421 gcc_unreachable ();
23425 /* A subroutine of the various atomic expanders. For sub-word operations,
23426 we must adjust things to operate on SImode. Given the original MEM,
23427 return a new aligned memory. Also build and return the quantities by
23428 which to shift and mask. */
23430 static rtx
23431 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23433 rtx addr, align, shift, mask, mem;
23434 HOST_WIDE_INT shift_mask;
23435 machine_mode mode = GET_MODE (orig_mem);
23437 /* For smaller modes, we have to implement this via SImode. */
23438 shift_mask = (mode == QImode ? 0x18 : 0x10);
23440 addr = XEXP (orig_mem, 0);
23441 addr = force_reg (GET_MODE (addr), addr);
23443 /* Aligned memory containing subword. Generate a new memory. We
23444 do not want any of the existing MEM_ATTR data, as we're now
23445 accessing memory outside the original object. */
23446 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23447 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23448 mem = gen_rtx_MEM (SImode, align);
23449 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23450 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23451 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23453 /* Shift amount for subword relative to aligned word. */
23454 shift = gen_reg_rtx (SImode);
23455 addr = gen_lowpart (SImode, addr);
23456 rtx tmp = gen_reg_rtx (SImode);
23457 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23458 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23459 if (BYTES_BIG_ENDIAN)
23460 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23461 shift, 1, OPTAB_LIB_WIDEN);
23462 *pshift = shift;
23464 /* Mask for insertion. */
23465 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23466 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23467 *pmask = mask;
23469 return mem;
23472 /* A subroutine of the various atomic expanders. For sub-word operands,
23473 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23475 static rtx
23476 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23478 rtx x;
23480 x = gen_reg_rtx (SImode);
23481 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23482 gen_rtx_NOT (SImode, mask),
23483 oldval)));
23485 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23487 return x;
23490 /* A subroutine of the various atomic expanders. For sub-word operands,
23491 extract WIDE to NARROW via SHIFT. */
23493 static void
23494 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23496 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23497 wide, 1, OPTAB_LIB_WIDEN);
23498 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23501 /* Expand an atomic compare and swap operation. */
23503 void
23504 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23506 rtx boolval, retval, mem, oldval, newval, cond;
23507 rtx label1, label2, x, mask, shift;
23508 machine_mode mode, orig_mode;
23509 enum memmodel mod_s, mod_f;
23510 bool is_weak;
23512 boolval = operands[0];
23513 retval = operands[1];
23514 mem = operands[2];
23515 oldval = operands[3];
23516 newval = operands[4];
23517 is_weak = (INTVAL (operands[5]) != 0);
23518 mod_s = memmodel_base (INTVAL (operands[6]));
23519 mod_f = memmodel_base (INTVAL (operands[7]));
23520 orig_mode = mode = GET_MODE (mem);
23522 mask = shift = NULL_RTX;
23523 if (mode == QImode || mode == HImode)
23525 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23526 lwarx and shift/mask operations. With power8, we need to do the
23527 comparison in SImode, but the store is still done in QI/HImode. */
23528 oldval = convert_modes (SImode, mode, oldval, 1);
23530 if (!TARGET_SYNC_HI_QI)
23532 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23534 /* Shift and mask OLDVAL into position with the word. */
23535 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23536 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23538 /* Shift and mask NEWVAL into position within the word. */
23539 newval = convert_modes (SImode, mode, newval, 1);
23540 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23541 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23544 /* Prepare to adjust the return value. */
23545 retval = gen_reg_rtx (SImode);
23546 mode = SImode;
23548 else if (reg_overlap_mentioned_p (retval, oldval))
23549 oldval = copy_to_reg (oldval);
23551 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23552 oldval = copy_to_mode_reg (mode, oldval);
23554 if (reg_overlap_mentioned_p (retval, newval))
23555 newval = copy_to_reg (newval);
23557 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23559 label1 = NULL_RTX;
23560 if (!is_weak)
23562 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23563 emit_label (XEXP (label1, 0));
23565 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23567 emit_load_locked (mode, retval, mem);
23569 x = retval;
23570 if (mask)
23571 x = expand_simple_binop (SImode, AND, retval, mask,
23572 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23574 cond = gen_reg_rtx (CCmode);
23575 /* If we have TImode, synthesize a comparison. */
23576 if (mode != TImode)
23577 x = gen_rtx_COMPARE (CCmode, x, oldval);
23578 else
23580 rtx xor1_result = gen_reg_rtx (DImode);
23581 rtx xor2_result = gen_reg_rtx (DImode);
23582 rtx or_result = gen_reg_rtx (DImode);
23583 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23584 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23585 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23586 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23588 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23589 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23590 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23591 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23594 emit_insn (gen_rtx_SET (cond, x));
23596 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23597 emit_unlikely_jump (x, label2);
23599 x = newval;
23600 if (mask)
23601 x = rs6000_mask_atomic_subword (retval, newval, mask);
23603 emit_store_conditional (orig_mode, cond, mem, x);
23605 if (!is_weak)
23607 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23608 emit_unlikely_jump (x, label1);
23611 if (!is_mm_relaxed (mod_f))
23612 emit_label (XEXP (label2, 0));
23614 rs6000_post_atomic_barrier (mod_s);
23616 if (is_mm_relaxed (mod_f))
23617 emit_label (XEXP (label2, 0));
23619 if (shift)
23620 rs6000_finish_atomic_subword (operands[1], retval, shift);
23621 else if (mode != GET_MODE (operands[1]))
23622 convert_move (operands[1], retval, 1);
23624 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23625 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23626 emit_insn (gen_rtx_SET (boolval, x));
23629 /* Expand an atomic exchange operation. */
23631 void
23632 rs6000_expand_atomic_exchange (rtx operands[])
23634 rtx retval, mem, val, cond;
23635 machine_mode mode;
23636 enum memmodel model;
23637 rtx label, x, mask, shift;
23639 retval = operands[0];
23640 mem = operands[1];
23641 val = operands[2];
23642 model = memmodel_base (INTVAL (operands[3]));
23643 mode = GET_MODE (mem);
23645 mask = shift = NULL_RTX;
23646 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23648 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23650 /* Shift and mask VAL into position with the word. */
23651 val = convert_modes (SImode, mode, val, 1);
23652 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23653 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23655 /* Prepare to adjust the return value. */
23656 retval = gen_reg_rtx (SImode);
23657 mode = SImode;
23660 mem = rs6000_pre_atomic_barrier (mem, model);
23662 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23663 emit_label (XEXP (label, 0));
23665 emit_load_locked (mode, retval, mem);
23667 x = val;
23668 if (mask)
23669 x = rs6000_mask_atomic_subword (retval, val, mask);
23671 cond = gen_reg_rtx (CCmode);
23672 emit_store_conditional (mode, cond, mem, x);
23674 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23675 emit_unlikely_jump (x, label);
23677 rs6000_post_atomic_barrier (model);
23679 if (shift)
23680 rs6000_finish_atomic_subword (operands[0], retval, shift);
23683 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23684 to perform. MEM is the memory on which to operate. VAL is the second
23685 operand of the binary operator. BEFORE and AFTER are optional locations to
23686 return the value of MEM either before of after the operation. MODEL_RTX
23687 is a CONST_INT containing the memory model to use. */
23689 void
23690 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23691 rtx orig_before, rtx orig_after, rtx model_rtx)
23693 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23694 machine_mode mode = GET_MODE (mem);
23695 machine_mode store_mode = mode;
23696 rtx label, x, cond, mask, shift;
23697 rtx before = orig_before, after = orig_after;
23699 mask = shift = NULL_RTX;
23700 /* On power8, we want to use SImode for the operation. On previous systems,
23701 use the operation in a subword and shift/mask to get the proper byte or
23702 halfword. */
23703 if (mode == QImode || mode == HImode)
23705 if (TARGET_SYNC_HI_QI)
23707 val = convert_modes (SImode, mode, val, 1);
23709 /* Prepare to adjust the return value. */
23710 before = gen_reg_rtx (SImode);
23711 if (after)
23712 after = gen_reg_rtx (SImode);
23713 mode = SImode;
23715 else
23717 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23719 /* Shift and mask VAL into position with the word. */
23720 val = convert_modes (SImode, mode, val, 1);
23721 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23722 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23724 switch (code)
23726 case IOR:
23727 case XOR:
23728 /* We've already zero-extended VAL. That is sufficient to
23729 make certain that it does not affect other bits. */
23730 mask = NULL;
23731 break;
23733 case AND:
23734 /* If we make certain that all of the other bits in VAL are
23735 set, that will be sufficient to not affect other bits. */
23736 x = gen_rtx_NOT (SImode, mask);
23737 x = gen_rtx_IOR (SImode, x, val);
23738 emit_insn (gen_rtx_SET (val, x));
23739 mask = NULL;
23740 break;
23742 case NOT:
23743 case PLUS:
23744 case MINUS:
23745 /* These will all affect bits outside the field and need
23746 adjustment via MASK within the loop. */
23747 break;
23749 default:
23750 gcc_unreachable ();
23753 /* Prepare to adjust the return value. */
23754 before = gen_reg_rtx (SImode);
23755 if (after)
23756 after = gen_reg_rtx (SImode);
23757 store_mode = mode = SImode;
23761 mem = rs6000_pre_atomic_barrier (mem, model);
23763 label = gen_label_rtx ();
23764 emit_label (label);
23765 label = gen_rtx_LABEL_REF (VOIDmode, label);
23767 if (before == NULL_RTX)
23768 before = gen_reg_rtx (mode);
23770 emit_load_locked (mode, before, mem);
23772 if (code == NOT)
23774 x = expand_simple_binop (mode, AND, before, val,
23775 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23776 after = expand_simple_unop (mode, NOT, x, after, 1);
23778 else
23780 after = expand_simple_binop (mode, code, before, val,
23781 after, 1, OPTAB_LIB_WIDEN);
23784 x = after;
23785 if (mask)
23787 x = expand_simple_binop (SImode, AND, after, mask,
23788 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23789 x = rs6000_mask_atomic_subword (before, x, mask);
23791 else if (store_mode != mode)
23792 x = convert_modes (store_mode, mode, x, 1);
23794 cond = gen_reg_rtx (CCmode);
23795 emit_store_conditional (store_mode, cond, mem, x);
23797 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23798 emit_unlikely_jump (x, label);
23800 rs6000_post_atomic_barrier (model);
23802 if (shift)
23804 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23805 then do the calcuations in a SImode register. */
23806 if (orig_before)
23807 rs6000_finish_atomic_subword (orig_before, before, shift);
23808 if (orig_after)
23809 rs6000_finish_atomic_subword (orig_after, after, shift);
23811 else if (store_mode != mode)
23813 /* QImode/HImode on machines with lbarx/lharx where we do the native
23814 operation and then do the calcuations in a SImode register. */
23815 if (orig_before)
23816 convert_move (orig_before, before, 1);
23817 if (orig_after)
23818 convert_move (orig_after, after, 1);
23820 else if (orig_after && after != orig_after)
23821 emit_move_insn (orig_after, after);
23824 /* Emit instructions to move SRC to DST. Called by splitters for
23825 multi-register moves. It will emit at most one instruction for
23826 each register that is accessed; that is, it won't emit li/lis pairs
23827 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23828 register. */
23830 void
23831 rs6000_split_multireg_move (rtx dst, rtx src)
23833 /* The register number of the first register being moved. */
23834 int reg;
23835 /* The mode that is to be moved. */
23836 machine_mode mode;
23837 /* The mode that the move is being done in, and its size. */
23838 machine_mode reg_mode;
23839 int reg_mode_size;
23840 /* The number of registers that will be moved. */
23841 int nregs;
23843 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23844 mode = GET_MODE (dst);
23845 nregs = hard_regno_nregs (reg, mode);
23846 if (FP_REGNO_P (reg))
23847 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23848 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
23849 else if (ALTIVEC_REGNO_P (reg))
23850 reg_mode = V16QImode;
23851 else
23852 reg_mode = word_mode;
23853 reg_mode_size = GET_MODE_SIZE (reg_mode);
23855 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23857 /* TDmode residing in FP registers is special, since the ISA requires that
23858 the lower-numbered word of a register pair is always the most significant
23859 word, even in little-endian mode. This does not match the usual subreg
23860 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23861 the appropriate constituent registers "by hand" in little-endian mode.
23863 Note we do not need to check for destructive overlap here since TDmode
23864 can only reside in even/odd register pairs. */
23865 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23867 rtx p_src, p_dst;
23868 int i;
23870 for (i = 0; i < nregs; i++)
23872 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23873 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23874 else
23875 p_src = simplify_gen_subreg (reg_mode, src, mode,
23876 i * reg_mode_size);
23878 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23879 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23880 else
23881 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23882 i * reg_mode_size);
23884 emit_insn (gen_rtx_SET (p_dst, p_src));
23887 return;
23890 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23892 /* Move register range backwards, if we might have destructive
23893 overlap. */
23894 int i;
23895 for (i = nregs - 1; i >= 0; i--)
23896 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23897 i * reg_mode_size),
23898 simplify_gen_subreg (reg_mode, src, mode,
23899 i * reg_mode_size)));
23901 else
23903 int i;
23904 int j = -1;
23905 bool used_update = false;
23906 rtx restore_basereg = NULL_RTX;
23908 if (MEM_P (src) && INT_REGNO_P (reg))
23910 rtx breg;
23912 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23913 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23915 rtx delta_rtx;
23916 breg = XEXP (XEXP (src, 0), 0);
23917 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23918 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23919 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23920 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23921 src = replace_equiv_address (src, breg);
23923 else if (! rs6000_offsettable_memref_p (src, reg_mode))
23925 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23927 rtx basereg = XEXP (XEXP (src, 0), 0);
23928 if (TARGET_UPDATE)
23930 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23931 emit_insn (gen_rtx_SET (ndst,
23932 gen_rtx_MEM (reg_mode,
23933 XEXP (src, 0))));
23934 used_update = true;
23936 else
23937 emit_insn (gen_rtx_SET (basereg,
23938 XEXP (XEXP (src, 0), 1)));
23939 src = replace_equiv_address (src, basereg);
23941 else
23943 rtx basereg = gen_rtx_REG (Pmode, reg);
23944 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23945 src = replace_equiv_address (src, basereg);
23949 breg = XEXP (src, 0);
23950 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23951 breg = XEXP (breg, 0);
23953 /* If the base register we are using to address memory is
23954 also a destination reg, then change that register last. */
23955 if (REG_P (breg)
23956 && REGNO (breg) >= REGNO (dst)
23957 && REGNO (breg) < REGNO (dst) + nregs)
23958 j = REGNO (breg) - REGNO (dst);
23960 else if (MEM_P (dst) && INT_REGNO_P (reg))
23962 rtx breg;
23964 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23965 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23967 rtx delta_rtx;
23968 breg = XEXP (XEXP (dst, 0), 0);
23969 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23970 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23971 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23973 /* We have to update the breg before doing the store.
23974 Use store with update, if available. */
23976 if (TARGET_UPDATE)
23978 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23979 emit_insn (TARGET_32BIT
23980 ? (TARGET_POWERPC64
23981 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23982 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23983 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23984 used_update = true;
23986 else
23987 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23988 dst = replace_equiv_address (dst, breg);
23990 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
23991 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23993 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23995 rtx basereg = XEXP (XEXP (dst, 0), 0);
23996 if (TARGET_UPDATE)
23998 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23999 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24000 XEXP (dst, 0)),
24001 nsrc));
24002 used_update = true;
24004 else
24005 emit_insn (gen_rtx_SET (basereg,
24006 XEXP (XEXP (dst, 0), 1)));
24007 dst = replace_equiv_address (dst, basereg);
24009 else
24011 rtx basereg = XEXP (XEXP (dst, 0), 0);
24012 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24013 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24014 && REG_P (basereg)
24015 && REG_P (offsetreg)
24016 && REGNO (basereg) != REGNO (offsetreg));
24017 if (REGNO (basereg) == 0)
24019 rtx tmp = offsetreg;
24020 offsetreg = basereg;
24021 basereg = tmp;
24023 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24024 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24025 dst = replace_equiv_address (dst, basereg);
24028 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24029 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
24032 for (i = 0; i < nregs; i++)
24034 /* Calculate index to next subword. */
24035 ++j;
24036 if (j == nregs)
24037 j = 0;
24039 /* If compiler already emitted move of first word by
24040 store with update, no need to do anything. */
24041 if (j == 0 && used_update)
24042 continue;
24044 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24045 j * reg_mode_size),
24046 simplify_gen_subreg (reg_mode, src, mode,
24047 j * reg_mode_size)));
24049 if (restore_basereg != NULL_RTX)
24050 emit_insn (restore_basereg);
24055 /* This page contains routines that are used to determine what the
24056 function prologue and epilogue code will do and write them out. */
24058 /* Determine whether the REG is really used. */
24060 static bool
24061 save_reg_p (int reg)
24063 /* We need to mark the PIC offset register live for the same conditions
24064 as it is set up, or otherwise it won't be saved before we clobber it. */
24066 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24068 /* When calling eh_return, we must return true for all the cases
24069 where conditional_register_usage marks the PIC offset reg
24070 call used. */
24071 if (TARGET_TOC && TARGET_MINIMAL_TOC
24072 && (crtl->calls_eh_return
24073 || df_regs_ever_live_p (reg)
24074 || !constant_pool_empty_p ()))
24075 return true;
24077 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
24078 && flag_pic)
24079 return true;
24082 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24085 /* Return the first fixed-point register that is required to be
24086 saved. 32 if none. */
24089 first_reg_to_save (void)
24091 int first_reg;
24093 /* Find lowest numbered live register. */
24094 for (first_reg = 13; first_reg <= 31; first_reg++)
24095 if (save_reg_p (first_reg))
24096 break;
24098 #if TARGET_MACHO
24099 if (flag_pic
24100 && crtl->uses_pic_offset_table
24101 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
24102 return RS6000_PIC_OFFSET_TABLE_REGNUM;
24103 #endif
24105 return first_reg;
24108 /* Similar, for FP regs. */
24111 first_fp_reg_to_save (void)
24113 int first_reg;
24115 /* Find lowest numbered live register. */
24116 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24117 if (save_reg_p (first_reg))
24118 break;
24120 return first_reg;
24123 /* Similar, for AltiVec regs. */
24125 static int
24126 first_altivec_reg_to_save (void)
24128 int i;
24130 /* Stack frame remains as is unless we are in AltiVec ABI. */
24131 if (! TARGET_ALTIVEC_ABI)
24132 return LAST_ALTIVEC_REGNO + 1;
24134 /* On Darwin, the unwind routines are compiled without
24135 TARGET_ALTIVEC, and use save_world to save/restore the
24136 altivec registers when necessary. */
24137 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24138 && ! TARGET_ALTIVEC)
24139 return FIRST_ALTIVEC_REGNO + 20;
24141 /* Find lowest numbered live register. */
24142 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24143 if (save_reg_p (i))
24144 break;
24146 return i;
24149 /* Return a 32-bit mask of the AltiVec registers we need to set in
24150 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24151 the 32-bit word is 0. */
24153 static unsigned int
24154 compute_vrsave_mask (void)
24156 unsigned int i, mask = 0;
24158 /* On Darwin, the unwind routines are compiled without
24159 TARGET_ALTIVEC, and use save_world to save/restore the
24160 call-saved altivec registers when necessary. */
24161 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24162 && ! TARGET_ALTIVEC)
24163 mask |= 0xFFF;
24165 /* First, find out if we use _any_ altivec registers. */
24166 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24167 if (df_regs_ever_live_p (i))
24168 mask |= ALTIVEC_REG_BIT (i);
24170 if (mask == 0)
24171 return mask;
24173 /* Next, remove the argument registers from the set. These must
24174 be in the VRSAVE mask set by the caller, so we don't need to add
24175 them in again. More importantly, the mask we compute here is
24176 used to generate CLOBBERs in the set_vrsave insn, and we do not
24177 wish the argument registers to die. */
24178 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24179 mask &= ~ALTIVEC_REG_BIT (i);
24181 /* Similarly, remove the return value from the set. */
24183 bool yes = false;
24184 diddle_return_value (is_altivec_return_reg, &yes);
24185 if (yes)
24186 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24189 return mask;
24192 /* For a very restricted set of circumstances, we can cut down the
24193 size of prologues/epilogues by calling our own save/restore-the-world
24194 routines. */
24196 static void
24197 compute_save_world_info (rs6000_stack_t *info)
24199 info->world_save_p = 1;
24200 info->world_save_p
24201 = (WORLD_SAVE_P (info)
24202 && DEFAULT_ABI == ABI_DARWIN
24203 && !cfun->has_nonlocal_label
24204 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24205 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24206 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24207 && info->cr_save_p);
24209 /* This will not work in conjunction with sibcalls. Make sure there
24210 are none. (This check is expensive, but seldom executed.) */
24211 if (WORLD_SAVE_P (info))
24213 rtx_insn *insn;
24214 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24215 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24217 info->world_save_p = 0;
24218 break;
24222 if (WORLD_SAVE_P (info))
24224 /* Even if we're not touching VRsave, make sure there's room on the
24225 stack for it, if it looks like we're calling SAVE_WORLD, which
24226 will attempt to save it. */
24227 info->vrsave_size = 4;
24229 /* If we are going to save the world, we need to save the link register too. */
24230 info->lr_save_p = 1;
24232 /* "Save" the VRsave register too if we're saving the world. */
24233 if (info->vrsave_mask == 0)
24234 info->vrsave_mask = compute_vrsave_mask ();
24236 /* Because the Darwin register save/restore routines only handle
24237 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24238 check. */
24239 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24240 && (info->first_altivec_reg_save
24241 >= FIRST_SAVED_ALTIVEC_REGNO));
24244 return;
24248 static void
24249 is_altivec_return_reg (rtx reg, void *xyes)
24251 bool *yes = (bool *) xyes;
24252 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24253 *yes = true;
24257 /* Return whether REG is a global user reg or has been specifed by
24258 -ffixed-REG. We should not restore these, and so cannot use
24259 lmw or out-of-line restore functions if there are any. We also
24260 can't save them (well, emit frame notes for them), because frame
24261 unwinding during exception handling will restore saved registers. */
24263 static bool
24264 fixed_reg_p (int reg)
24266 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24267 backend sets it, overriding anything the user might have given. */
24268 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24269 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24270 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24271 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24272 return false;
24274 return fixed_regs[reg];
24277 /* Determine the strategy for savings/restoring registers. */
24279 enum {
24280 SAVE_MULTIPLE = 0x1,
24281 SAVE_INLINE_GPRS = 0x2,
24282 SAVE_INLINE_FPRS = 0x4,
24283 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24284 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24285 SAVE_INLINE_VRS = 0x20,
24286 REST_MULTIPLE = 0x100,
24287 REST_INLINE_GPRS = 0x200,
24288 REST_INLINE_FPRS = 0x400,
24289 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24290 REST_INLINE_VRS = 0x1000
24293 static int
24294 rs6000_savres_strategy (rs6000_stack_t *info,
24295 bool using_static_chain_p)
24297 int strategy = 0;
24299 /* Select between in-line and out-of-line save and restore of regs.
24300 First, all the obvious cases where we don't use out-of-line. */
24301 if (crtl->calls_eh_return
24302 || cfun->machine->ra_need_lr)
24303 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24304 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24305 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24307 if (info->first_gp_reg_save == 32)
24308 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24310 if (info->first_fp_reg_save == 64
24311 /* The out-of-line FP routines use double-precision stores;
24312 we can't use those routines if we don't have such stores. */
24313 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
24314 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24316 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24317 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24319 /* Define cutoff for using out-of-line functions to save registers. */
24320 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24322 if (!optimize_size)
24324 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24325 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24326 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24328 else
24330 /* Prefer out-of-line restore if it will exit. */
24331 if (info->first_fp_reg_save > 61)
24332 strategy |= SAVE_INLINE_FPRS;
24333 if (info->first_gp_reg_save > 29)
24335 if (info->first_fp_reg_save == 64)
24336 strategy |= SAVE_INLINE_GPRS;
24337 else
24338 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24340 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24341 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24344 else if (DEFAULT_ABI == ABI_DARWIN)
24346 if (info->first_fp_reg_save > 60)
24347 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24348 if (info->first_gp_reg_save > 29)
24349 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24350 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24352 else
24354 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24355 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24356 || info->first_fp_reg_save > 61)
24357 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24358 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24359 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24362 /* Don't bother to try to save things out-of-line if r11 is occupied
24363 by the static chain. It would require too much fiddling and the
24364 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24365 pointer on Darwin, and AIX uses r1 or r12. */
24366 if (using_static_chain_p
24367 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24368 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24369 | SAVE_INLINE_GPRS
24370 | SAVE_INLINE_VRS);
24372 /* Don't ever restore fixed regs. That means we can't use the
24373 out-of-line register restore functions if a fixed reg is in the
24374 range of regs restored. */
24375 if (!(strategy & REST_INLINE_FPRS))
24376 for (int i = info->first_fp_reg_save; i < 64; i++)
24377 if (fixed_regs[i])
24379 strategy |= REST_INLINE_FPRS;
24380 break;
24383 /* We can only use the out-of-line routines to restore fprs if we've
24384 saved all the registers from first_fp_reg_save in the prologue.
24385 Otherwise, we risk loading garbage. Of course, if we have saved
24386 out-of-line then we know we haven't skipped any fprs. */
24387 if ((strategy & SAVE_INLINE_FPRS)
24388 && !(strategy & REST_INLINE_FPRS))
24389 for (int i = info->first_fp_reg_save; i < 64; i++)
24390 if (!save_reg_p (i))
24392 strategy |= REST_INLINE_FPRS;
24393 break;
24396 /* Similarly, for altivec regs. */
24397 if (!(strategy & REST_INLINE_VRS))
24398 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24399 if (fixed_regs[i])
24401 strategy |= REST_INLINE_VRS;
24402 break;
24405 if ((strategy & SAVE_INLINE_VRS)
24406 && !(strategy & REST_INLINE_VRS))
24407 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24408 if (!save_reg_p (i))
24410 strategy |= REST_INLINE_VRS;
24411 break;
24414 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24415 saved is an out-of-line save or restore. Set up the value for
24416 the next test (excluding out-of-line gprs). */
24417 bool lr_save_p = (info->lr_save_p
24418 || !(strategy & SAVE_INLINE_FPRS)
24419 || !(strategy & SAVE_INLINE_VRS)
24420 || !(strategy & REST_INLINE_FPRS)
24421 || !(strategy & REST_INLINE_VRS));
24423 if (TARGET_MULTIPLE
24424 && !TARGET_POWERPC64
24425 && info->first_gp_reg_save < 31
24426 && !(flag_shrink_wrap
24427 && flag_shrink_wrap_separate
24428 && optimize_function_for_speed_p (cfun)))
24430 int count = 0;
24431 for (int i = info->first_gp_reg_save; i < 32; i++)
24432 if (save_reg_p (i))
24433 count++;
24435 if (count <= 1)
24436 /* Don't use store multiple if only one reg needs to be
24437 saved. This can occur for example when the ABI_V4 pic reg
24438 (r30) needs to be saved to make calls, but r31 is not
24439 used. */
24440 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24441 else
24443 /* Prefer store multiple for saves over out-of-line
24444 routines, since the store-multiple instruction will
24445 always be smaller. */
24446 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24448 /* The situation is more complicated with load multiple.
24449 We'd prefer to use the out-of-line routines for restores,
24450 since the "exit" out-of-line routines can handle the
24451 restore of LR and the frame teardown. However if doesn't
24452 make sense to use the out-of-line routine if that is the
24453 only reason we'd need to save LR, and we can't use the
24454 "exit" out-of-line gpr restore if we have saved some
24455 fprs; In those cases it is advantageous to use load
24456 multiple when available. */
24457 if (info->first_fp_reg_save != 64 || !lr_save_p)
24458 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24462 /* Using the "exit" out-of-line routine does not improve code size
24463 if using it would require lr to be saved and if only saving one
24464 or two gprs. */
24465 else if (!lr_save_p && info->first_gp_reg_save > 29)
24466 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24468 /* Don't ever restore fixed regs. */
24469 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24470 for (int i = info->first_gp_reg_save; i < 32; i++)
24471 if (fixed_reg_p (i))
24473 strategy |= REST_INLINE_GPRS;
24474 strategy &= ~REST_MULTIPLE;
24475 break;
24478 /* We can only use load multiple or the out-of-line routines to
24479 restore gprs if we've saved all the registers from
24480 first_gp_reg_save. Otherwise, we risk loading garbage.
24481 Of course, if we have saved out-of-line or used stmw then we know
24482 we haven't skipped any gprs. */
24483 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24484 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24485 for (int i = info->first_gp_reg_save; i < 32; i++)
24486 if (!save_reg_p (i))
24488 strategy |= REST_INLINE_GPRS;
24489 strategy &= ~REST_MULTIPLE;
24490 break;
24493 if (TARGET_ELF && TARGET_64BIT)
24495 if (!(strategy & SAVE_INLINE_FPRS))
24496 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24497 else if (!(strategy & SAVE_INLINE_GPRS)
24498 && info->first_fp_reg_save == 64)
24499 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24501 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24502 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24504 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24505 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24507 return strategy;
24510 /* Calculate the stack information for the current function. This is
24511 complicated by having two separate calling sequences, the AIX calling
24512 sequence and the V.4 calling sequence.
24514 AIX (and Darwin/Mac OS X) stack frames look like:
24515 32-bit 64-bit
24516 SP----> +---------------------------------------+
24517 | back chain to caller | 0 0
24518 +---------------------------------------+
24519 | saved CR | 4 8 (8-11)
24520 +---------------------------------------+
24521 | saved LR | 8 16
24522 +---------------------------------------+
24523 | reserved for compilers | 12 24
24524 +---------------------------------------+
24525 | reserved for binders | 16 32
24526 +---------------------------------------+
24527 | saved TOC pointer | 20 40
24528 +---------------------------------------+
24529 | Parameter save area (+padding*) (P) | 24 48
24530 +---------------------------------------+
24531 | Alloca space (A) | 24+P etc.
24532 +---------------------------------------+
24533 | Local variable space (L) | 24+P+A
24534 +---------------------------------------+
24535 | Float/int conversion temporary (X) | 24+P+A+L
24536 +---------------------------------------+
24537 | Save area for AltiVec registers (W) | 24+P+A+L+X
24538 +---------------------------------------+
24539 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24540 +---------------------------------------+
24541 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24542 +---------------------------------------+
24543 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24544 +---------------------------------------+
24545 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24546 +---------------------------------------+
24547 old SP->| back chain to caller's caller |
24548 +---------------------------------------+
24550 * If the alloca area is present, the parameter save area is
24551 padded so that the former starts 16-byte aligned.
24553 The required alignment for AIX configurations is two words (i.e., 8
24554 or 16 bytes).
24556 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24558 SP----> +---------------------------------------+
24559 | Back chain to caller | 0
24560 +---------------------------------------+
24561 | Save area for CR | 8
24562 +---------------------------------------+
24563 | Saved LR | 16
24564 +---------------------------------------+
24565 | Saved TOC pointer | 24
24566 +---------------------------------------+
24567 | Parameter save area (+padding*) (P) | 32
24568 +---------------------------------------+
24569 | Alloca space (A) | 32+P
24570 +---------------------------------------+
24571 | Local variable space (L) | 32+P+A
24572 +---------------------------------------+
24573 | Save area for AltiVec registers (W) | 32+P+A+L
24574 +---------------------------------------+
24575 | AltiVec alignment padding (Y) | 32+P+A+L+W
24576 +---------------------------------------+
24577 | Save area for GP registers (G) | 32+P+A+L+W+Y
24578 +---------------------------------------+
24579 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24580 +---------------------------------------+
24581 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24582 +---------------------------------------+
24584 * If the alloca area is present, the parameter save area is
24585 padded so that the former starts 16-byte aligned.
24587 V.4 stack frames look like:
24589 SP----> +---------------------------------------+
24590 | back chain to caller | 0
24591 +---------------------------------------+
24592 | caller's saved LR | 4
24593 +---------------------------------------+
24594 | Parameter save area (+padding*) (P) | 8
24595 +---------------------------------------+
24596 | Alloca space (A) | 8+P
24597 +---------------------------------------+
24598 | Varargs save area (V) | 8+P+A
24599 +---------------------------------------+
24600 | Local variable space (L) | 8+P+A+V
24601 +---------------------------------------+
24602 | Float/int conversion temporary (X) | 8+P+A+V+L
24603 +---------------------------------------+
24604 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24605 +---------------------------------------+
24606 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24607 +---------------------------------------+
24608 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24609 +---------------------------------------+
24610 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24611 +---------------------------------------+
24612 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24613 +---------------------------------------+
24614 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24615 +---------------------------------------+
24616 old SP->| back chain to caller's caller |
24617 +---------------------------------------+
24619 * If the alloca area is present and the required alignment is
24620 16 bytes, the parameter save area is padded so that the
24621 alloca area starts 16-byte aligned.
24623 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24624 given. (But note below and in sysv4.h that we require only 8 and
24625 may round up the size of our stack frame anyways. The historical
24626 reason is early versions of powerpc-linux which didn't properly
24627 align the stack at program startup. A happy side-effect is that
24628 -mno-eabi libraries can be used with -meabi programs.)
24630 The EABI configuration defaults to the V.4 layout. However,
24631 the stack alignment requirements may differ. If -mno-eabi is not
24632 given, the required stack alignment is 8 bytes; if -mno-eabi is
24633 given, the required alignment is 16 bytes. (But see V.4 comment
24634 above.) */
24636 #ifndef ABI_STACK_BOUNDARY
24637 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24638 #endif
24640 static rs6000_stack_t *
24641 rs6000_stack_info (void)
24643 /* We should never be called for thunks, we are not set up for that. */
24644 gcc_assert (!cfun->is_thunk);
24646 rs6000_stack_t *info = &stack_info;
24647 int reg_size = TARGET_32BIT ? 4 : 8;
24648 int ehrd_size;
24649 int ehcr_size;
24650 int save_align;
24651 int first_gp;
24652 HOST_WIDE_INT non_fixed_size;
24653 bool using_static_chain_p;
24655 if (reload_completed && info->reload_completed)
24656 return info;
24658 memset (info, 0, sizeof (*info));
24659 info->reload_completed = reload_completed;
24661 /* Select which calling sequence. */
24662 info->abi = DEFAULT_ABI;
24664 /* Calculate which registers need to be saved & save area size. */
24665 info->first_gp_reg_save = first_reg_to_save ();
24666 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24667 even if it currently looks like we won't. Reload may need it to
24668 get at a constant; if so, it will have already created a constant
24669 pool entry for it. */
24670 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24671 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24672 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24673 && crtl->uses_const_pool
24674 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24675 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24676 else
24677 first_gp = info->first_gp_reg_save;
24679 info->gp_size = reg_size * (32 - first_gp);
24681 info->first_fp_reg_save = first_fp_reg_to_save ();
24682 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24684 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24685 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24686 - info->first_altivec_reg_save);
24688 /* Does this function call anything? */
24689 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24691 /* Determine if we need to save the condition code registers. */
24692 if (save_reg_p (CR2_REGNO)
24693 || save_reg_p (CR3_REGNO)
24694 || save_reg_p (CR4_REGNO))
24696 info->cr_save_p = 1;
24697 if (DEFAULT_ABI == ABI_V4)
24698 info->cr_size = reg_size;
24701 /* If the current function calls __builtin_eh_return, then we need
24702 to allocate stack space for registers that will hold data for
24703 the exception handler. */
24704 if (crtl->calls_eh_return)
24706 unsigned int i;
24707 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24708 continue;
24710 ehrd_size = i * UNITS_PER_WORD;
24712 else
24713 ehrd_size = 0;
24715 /* In the ELFv2 ABI, we also need to allocate space for separate
24716 CR field save areas if the function calls __builtin_eh_return. */
24717 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24719 /* This hard-codes that we have three call-saved CR fields. */
24720 ehcr_size = 3 * reg_size;
24721 /* We do *not* use the regular CR save mechanism. */
24722 info->cr_save_p = 0;
24724 else
24725 ehcr_size = 0;
24727 /* Determine various sizes. */
24728 info->reg_size = reg_size;
24729 info->fixed_size = RS6000_SAVE_AREA;
24730 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24731 if (cfun->calls_alloca)
24732 info->parm_size =
24733 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24734 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24735 else
24736 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24737 TARGET_ALTIVEC ? 16 : 8);
24738 if (FRAME_GROWS_DOWNWARD)
24739 info->vars_size
24740 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24741 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24742 - (info->fixed_size + info->vars_size + info->parm_size);
24744 if (TARGET_ALTIVEC_ABI)
24745 info->vrsave_mask = compute_vrsave_mask ();
24747 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24748 info->vrsave_size = 4;
24750 compute_save_world_info (info);
24752 /* Calculate the offsets. */
24753 switch (DEFAULT_ABI)
24755 case ABI_NONE:
24756 default:
24757 gcc_unreachable ();
24759 case ABI_AIX:
24760 case ABI_ELFv2:
24761 case ABI_DARWIN:
24762 info->fp_save_offset = -info->fp_size;
24763 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24765 if (TARGET_ALTIVEC_ABI)
24767 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24769 /* Align stack so vector save area is on a quadword boundary.
24770 The padding goes above the vectors. */
24771 if (info->altivec_size != 0)
24772 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24774 info->altivec_save_offset = info->vrsave_save_offset
24775 - info->altivec_padding_size
24776 - info->altivec_size;
24777 gcc_assert (info->altivec_size == 0
24778 || info->altivec_save_offset % 16 == 0);
24780 /* Adjust for AltiVec case. */
24781 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24783 else
24784 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24786 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24787 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24788 info->lr_save_offset = 2*reg_size;
24789 break;
24791 case ABI_V4:
24792 info->fp_save_offset = -info->fp_size;
24793 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24794 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24796 if (TARGET_ALTIVEC_ABI)
24798 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24800 /* Align stack so vector save area is on a quadword boundary. */
24801 if (info->altivec_size != 0)
24802 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24804 info->altivec_save_offset = info->vrsave_save_offset
24805 - info->altivec_padding_size
24806 - info->altivec_size;
24808 /* Adjust for AltiVec case. */
24809 info->ehrd_offset = info->altivec_save_offset;
24811 else
24812 info->ehrd_offset = info->cr_save_offset;
24814 info->ehrd_offset -= ehrd_size;
24815 info->lr_save_offset = reg_size;
24818 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24819 info->save_size = RS6000_ALIGN (info->fp_size
24820 + info->gp_size
24821 + info->altivec_size
24822 + info->altivec_padding_size
24823 + ehrd_size
24824 + ehcr_size
24825 + info->cr_size
24826 + info->vrsave_size,
24827 save_align);
24829 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24831 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24832 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24834 /* Determine if we need to save the link register. */
24835 if (info->calls_p
24836 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24837 && crtl->profile
24838 && !TARGET_PROFILE_KERNEL)
24839 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24840 #ifdef TARGET_RELOCATABLE
24841 || (DEFAULT_ABI == ABI_V4
24842 && (TARGET_RELOCATABLE || flag_pic > 1)
24843 && !constant_pool_empty_p ())
24844 #endif
24845 || rs6000_ra_ever_killed ())
24846 info->lr_save_p = 1;
24848 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24849 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24850 && call_used_regs[STATIC_CHAIN_REGNUM]);
24851 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24853 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24854 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24855 || !(info->savres_strategy & SAVE_INLINE_VRS)
24856 || !(info->savres_strategy & REST_INLINE_GPRS)
24857 || !(info->savres_strategy & REST_INLINE_FPRS)
24858 || !(info->savres_strategy & REST_INLINE_VRS))
24859 info->lr_save_p = 1;
24861 if (info->lr_save_p)
24862 df_set_regs_ever_live (LR_REGNO, true);
24864 /* Determine if we need to allocate any stack frame:
24866 For AIX we need to push the stack if a frame pointer is needed
24867 (because the stack might be dynamically adjusted), if we are
24868 debugging, if we make calls, or if the sum of fp_save, gp_save,
24869 and local variables are more than the space needed to save all
24870 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24871 + 18*8 = 288 (GPR13 reserved).
24873 For V.4 we don't have the stack cushion that AIX uses, but assume
24874 that the debugger can handle stackless frames. */
24876 if (info->calls_p)
24877 info->push_p = 1;
24879 else if (DEFAULT_ABI == ABI_V4)
24880 info->push_p = non_fixed_size != 0;
24882 else if (frame_pointer_needed)
24883 info->push_p = 1;
24885 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24886 info->push_p = 1;
24888 else
24889 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24891 return info;
24894 static void
24895 debug_stack_info (rs6000_stack_t *info)
24897 const char *abi_string;
24899 if (! info)
24900 info = rs6000_stack_info ();
24902 fprintf (stderr, "\nStack information for function %s:\n",
24903 ((current_function_decl && DECL_NAME (current_function_decl))
24904 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24905 : "<unknown>"));
24907 switch (info->abi)
24909 default: abi_string = "Unknown"; break;
24910 case ABI_NONE: abi_string = "NONE"; break;
24911 case ABI_AIX: abi_string = "AIX"; break;
24912 case ABI_ELFv2: abi_string = "ELFv2"; break;
24913 case ABI_DARWIN: abi_string = "Darwin"; break;
24914 case ABI_V4: abi_string = "V.4"; break;
24917 fprintf (stderr, "\tABI = %5s\n", abi_string);
24919 if (TARGET_ALTIVEC_ABI)
24920 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24922 if (info->first_gp_reg_save != 32)
24923 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24925 if (info->first_fp_reg_save != 64)
24926 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24928 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24929 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24930 info->first_altivec_reg_save);
24932 if (info->lr_save_p)
24933 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24935 if (info->cr_save_p)
24936 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24938 if (info->vrsave_mask)
24939 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24941 if (info->push_p)
24942 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24944 if (info->calls_p)
24945 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24947 if (info->gp_size)
24948 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24950 if (info->fp_size)
24951 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24953 if (info->altivec_size)
24954 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24955 info->altivec_save_offset);
24957 if (info->vrsave_size)
24958 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24959 info->vrsave_save_offset);
24961 if (info->lr_save_p)
24962 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24964 if (info->cr_save_p)
24965 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24967 if (info->varargs_save_offset)
24968 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24970 if (info->total_size)
24971 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24972 info->total_size);
24974 if (info->vars_size)
24975 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24976 info->vars_size);
24978 if (info->parm_size)
24979 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24981 if (info->fixed_size)
24982 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24984 if (info->gp_size)
24985 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24987 if (info->fp_size)
24988 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24990 if (info->altivec_size)
24991 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24993 if (info->vrsave_size)
24994 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24996 if (info->altivec_padding_size)
24997 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24998 info->altivec_padding_size);
25000 if (info->cr_size)
25001 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25003 if (info->save_size)
25004 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25006 if (info->reg_size != 4)
25007 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25009 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25011 fprintf (stderr, "\n");
25015 rs6000_return_addr (int count, rtx frame)
25017 /* Currently we don't optimize very well between prolog and body
25018 code and for PIC code the code can be actually quite bad, so
25019 don't try to be too clever here. */
25020 if (count != 0
25021 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25023 cfun->machine->ra_needs_full_frame = 1;
25025 return
25026 gen_rtx_MEM
25027 (Pmode,
25028 memory_address
25029 (Pmode,
25030 plus_constant (Pmode,
25031 copy_to_reg
25032 (gen_rtx_MEM (Pmode,
25033 memory_address (Pmode, frame))),
25034 RETURN_ADDRESS_OFFSET)));
25037 cfun->machine->ra_need_lr = 1;
25038 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25041 /* Say whether a function is a candidate for sibcall handling or not. */
25043 static bool
25044 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25046 tree fntype;
25048 if (decl)
25049 fntype = TREE_TYPE (decl);
25050 else
25051 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25053 /* We can't do it if the called function has more vector parameters
25054 than the current function; there's nowhere to put the VRsave code. */
25055 if (TARGET_ALTIVEC_ABI
25056 && TARGET_ALTIVEC_VRSAVE
25057 && !(decl && decl == current_function_decl))
25059 function_args_iterator args_iter;
25060 tree type;
25061 int nvreg = 0;
25063 /* Functions with vector parameters are required to have a
25064 prototype, so the argument type info must be available
25065 here. */
25066 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25067 if (TREE_CODE (type) == VECTOR_TYPE
25068 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25069 nvreg++;
25071 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25072 if (TREE_CODE (type) == VECTOR_TYPE
25073 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25074 nvreg--;
25076 if (nvreg > 0)
25077 return false;
25080 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25081 functions, because the callee may have a different TOC pointer to
25082 the caller and there's no way to ensure we restore the TOC when
25083 we return. With the secure-plt SYSV ABI we can't make non-local
25084 calls when -fpic/PIC because the plt call stubs use r30. */
25085 if (DEFAULT_ABI == ABI_DARWIN
25086 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25087 && decl
25088 && !DECL_EXTERNAL (decl)
25089 && !DECL_WEAK (decl)
25090 && (*targetm.binds_local_p) (decl))
25091 || (DEFAULT_ABI == ABI_V4
25092 && (!TARGET_SECURE_PLT
25093 || !flag_pic
25094 || (decl
25095 && (*targetm.binds_local_p) (decl)))))
25097 tree attr_list = TYPE_ATTRIBUTES (fntype);
25099 if (!lookup_attribute ("longcall", attr_list)
25100 || lookup_attribute ("shortcall", attr_list))
25101 return true;
25104 return false;
25107 static int
25108 rs6000_ra_ever_killed (void)
25110 rtx_insn *top;
25111 rtx reg;
25112 rtx_insn *insn;
25114 if (cfun->is_thunk)
25115 return 0;
25117 if (cfun->machine->lr_save_state)
25118 return cfun->machine->lr_save_state - 1;
25120 /* regs_ever_live has LR marked as used if any sibcalls are present,
25121 but this should not force saving and restoring in the
25122 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25123 clobbers LR, so that is inappropriate. */
25125 /* Also, the prologue can generate a store into LR that
25126 doesn't really count, like this:
25128 move LR->R0
25129 bcl to set PIC register
25130 move LR->R31
25131 move R0->LR
25133 When we're called from the epilogue, we need to avoid counting
25134 this as a store. */
25136 push_topmost_sequence ();
25137 top = get_insns ();
25138 pop_topmost_sequence ();
25139 reg = gen_rtx_REG (Pmode, LR_REGNO);
25141 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25143 if (INSN_P (insn))
25145 if (CALL_P (insn))
25147 if (!SIBLING_CALL_P (insn))
25148 return 1;
25150 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25151 return 1;
25152 else if (set_of (reg, insn) != NULL_RTX
25153 && !prologue_epilogue_contains (insn))
25154 return 1;
25157 return 0;
25160 /* Emit instructions needed to load the TOC register.
25161 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25162 a constant pool; or for SVR4 -fpic. */
25164 void
25165 rs6000_emit_load_toc_table (int fromprolog)
25167 rtx dest;
25168 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25170 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25172 char buf[30];
25173 rtx lab, tmp1, tmp2, got;
25175 lab = gen_label_rtx ();
25176 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25177 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25178 if (flag_pic == 2)
25180 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25181 need_toc_init = 1;
25183 else
25184 got = rs6000_got_sym ();
25185 tmp1 = tmp2 = dest;
25186 if (!fromprolog)
25188 tmp1 = gen_reg_rtx (Pmode);
25189 tmp2 = gen_reg_rtx (Pmode);
25191 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25192 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25193 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25194 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25196 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25198 emit_insn (gen_load_toc_v4_pic_si ());
25199 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25201 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25203 char buf[30];
25204 rtx temp0 = (fromprolog
25205 ? gen_rtx_REG (Pmode, 0)
25206 : gen_reg_rtx (Pmode));
25208 if (fromprolog)
25210 rtx symF, symL;
25212 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25213 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25215 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25216 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25218 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25219 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25220 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25222 else
25224 rtx tocsym, lab;
25226 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25227 need_toc_init = 1;
25228 lab = gen_label_rtx ();
25229 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25230 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25231 if (TARGET_LINK_STACK)
25232 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25233 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25235 emit_insn (gen_addsi3 (dest, temp0, dest));
25237 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25239 /* This is for AIX code running in non-PIC ELF32. */
25240 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25242 need_toc_init = 1;
25243 emit_insn (gen_elf_high (dest, realsym));
25244 emit_insn (gen_elf_low (dest, dest, realsym));
25246 else
25248 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25250 if (TARGET_32BIT)
25251 emit_insn (gen_load_toc_aix_si (dest));
25252 else
25253 emit_insn (gen_load_toc_aix_di (dest));
25257 /* Emit instructions to restore the link register after determining where
25258 its value has been stored. */
25260 void
25261 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25263 rs6000_stack_t *info = rs6000_stack_info ();
25264 rtx operands[2];
25266 operands[0] = source;
25267 operands[1] = scratch;
25269 if (info->lr_save_p)
25271 rtx frame_rtx = stack_pointer_rtx;
25272 HOST_WIDE_INT sp_offset = 0;
25273 rtx tmp;
25275 if (frame_pointer_needed
25276 || cfun->calls_alloca
25277 || info->total_size > 32767)
25279 tmp = gen_frame_mem (Pmode, frame_rtx);
25280 emit_move_insn (operands[1], tmp);
25281 frame_rtx = operands[1];
25283 else if (info->push_p)
25284 sp_offset = info->total_size;
25286 tmp = plus_constant (Pmode, frame_rtx,
25287 info->lr_save_offset + sp_offset);
25288 tmp = gen_frame_mem (Pmode, tmp);
25289 emit_move_insn (tmp, operands[0]);
25291 else
25292 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25294 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25295 state of lr_save_p so any change from here on would be a bug. In
25296 particular, stop rs6000_ra_ever_killed from considering the SET
25297 of lr we may have added just above. */
25298 cfun->machine->lr_save_state = info->lr_save_p + 1;
25301 static GTY(()) alias_set_type set = -1;
25303 alias_set_type
25304 get_TOC_alias_set (void)
25306 if (set == -1)
25307 set = new_alias_set ();
25308 return set;
25311 /* This returns nonzero if the current function uses the TOC. This is
25312 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25313 is generated by the ABI_V4 load_toc_* patterns.
25314 Return 2 instead of 1 if the load_toc_* pattern is in the function
25315 partition that doesn't start the function. */
25316 #if TARGET_ELF
25317 static int
25318 uses_TOC (void)
25320 rtx_insn *insn;
25321 int ret = 1;
25323 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25325 if (INSN_P (insn))
25327 rtx pat = PATTERN (insn);
25328 int i;
25330 if (GET_CODE (pat) == PARALLEL)
25331 for (i = 0; i < XVECLEN (pat, 0); i++)
25333 rtx sub = XVECEXP (pat, 0, i);
25334 if (GET_CODE (sub) == USE)
25336 sub = XEXP (sub, 0);
25337 if (GET_CODE (sub) == UNSPEC
25338 && XINT (sub, 1) == UNSPEC_TOC)
25339 return ret;
25343 else if (crtl->has_bb_partition
25344 && NOTE_P (insn)
25345 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25346 ret = 2;
25348 return 0;
25350 #endif
25353 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25355 rtx tocrel, tocreg, hi;
25357 if (TARGET_DEBUG_ADDR)
25359 if (GET_CODE (symbol) == SYMBOL_REF)
25360 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25361 XSTR (symbol, 0));
25362 else
25364 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25365 GET_RTX_NAME (GET_CODE (symbol)));
25366 debug_rtx (symbol);
25370 if (!can_create_pseudo_p ())
25371 df_set_regs_ever_live (TOC_REGISTER, true);
25373 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25374 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25375 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25376 return tocrel;
25378 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25379 if (largetoc_reg != NULL)
25381 emit_move_insn (largetoc_reg, hi);
25382 hi = largetoc_reg;
25384 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25387 /* Issue assembly directives that create a reference to the given DWARF
25388 FRAME_TABLE_LABEL from the current function section. */
25389 void
25390 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25392 fprintf (asm_out_file, "\t.ref %s\n",
25393 (* targetm.strip_name_encoding) (frame_table_label));
25396 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25397 and the change to the stack pointer. */
25399 static void
25400 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25402 rtvec p;
25403 int i;
25404 rtx regs[3];
25406 i = 0;
25407 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25408 if (hard_frame_needed)
25409 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25410 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25411 || (hard_frame_needed
25412 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25413 regs[i++] = fp;
25415 p = rtvec_alloc (i);
25416 while (--i >= 0)
25418 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25419 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25422 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25425 /* Emit the correct code for allocating stack space, as insns.
25426 If COPY_REG, make sure a copy of the old frame is left there.
25427 The generated code may use hard register 0 as a temporary. */
25429 static rtx_insn *
25430 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25432 rtx_insn *insn;
25433 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25434 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25435 rtx todec = gen_int_mode (-size, Pmode);
25436 rtx par, set, mem;
25438 if (INTVAL (todec) != -size)
25440 warning (0, "stack frame too large");
25441 emit_insn (gen_trap ());
25442 return 0;
25445 if (crtl->limit_stack)
25447 if (REG_P (stack_limit_rtx)
25448 && REGNO (stack_limit_rtx) > 1
25449 && REGNO (stack_limit_rtx) <= 31)
25451 rtx_insn *insn
25452 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25453 gcc_assert (insn);
25454 emit_insn (insn);
25455 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25457 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25458 && TARGET_32BIT
25459 && DEFAULT_ABI == ABI_V4
25460 && !flag_pic)
25462 rtx toload = gen_rtx_CONST (VOIDmode,
25463 gen_rtx_PLUS (Pmode,
25464 stack_limit_rtx,
25465 GEN_INT (size)));
25467 emit_insn (gen_elf_high (tmp_reg, toload));
25468 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25469 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25470 const0_rtx));
25472 else
25473 warning (0, "stack limit expression is not supported");
25476 if (copy_reg)
25478 if (copy_off != 0)
25479 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25480 else
25481 emit_move_insn (copy_reg, stack_reg);
25484 if (size > 32767)
25486 /* Need a note here so that try_split doesn't get confused. */
25487 if (get_last_insn () == NULL_RTX)
25488 emit_note (NOTE_INSN_DELETED);
25489 insn = emit_move_insn (tmp_reg, todec);
25490 try_split (PATTERN (insn), insn, 0);
25491 todec = tmp_reg;
25494 insn = emit_insn (TARGET_32BIT
25495 ? gen_movsi_update_stack (stack_reg, stack_reg,
25496 todec, stack_reg)
25497 : gen_movdi_di_update_stack (stack_reg, stack_reg,
25498 todec, stack_reg));
25499 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25500 it now and set the alias set/attributes. The above gen_*_update
25501 calls will generate a PARALLEL with the MEM set being the first
25502 operation. */
25503 par = PATTERN (insn);
25504 gcc_assert (GET_CODE (par) == PARALLEL);
25505 set = XVECEXP (par, 0, 0);
25506 gcc_assert (GET_CODE (set) == SET);
25507 mem = SET_DEST (set);
25508 gcc_assert (MEM_P (mem));
25509 MEM_NOTRAP_P (mem) = 1;
25510 set_mem_alias_set (mem, get_frame_alias_set ());
25512 RTX_FRAME_RELATED_P (insn) = 1;
25513 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25514 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
25515 GEN_INT (-size))));
25516 return insn;
25519 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25521 #if PROBE_INTERVAL > 32768
25522 #error Cannot use indexed addressing mode for stack probing
25523 #endif
25525 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25526 inclusive. These are offsets from the current stack pointer. */
25528 static void
25529 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25531 /* See if we have a constant small number of probes to generate. If so,
25532 that's the easy case. */
25533 if (first + size <= 32768)
25535 HOST_WIDE_INT i;
25537 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25538 it exceeds SIZE. If only one probe is needed, this will not
25539 generate any code. Then probe at FIRST + SIZE. */
25540 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25541 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25542 -(first + i)));
25544 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25545 -(first + size)));
25548 /* Otherwise, do the same as above, but in a loop. Note that we must be
25549 extra careful with variables wrapping around because we might be at
25550 the very top (or the very bottom) of the address space and we have
25551 to be able to handle this case properly; in particular, we use an
25552 equality test for the loop condition. */
25553 else
25555 HOST_WIDE_INT rounded_size;
25556 rtx r12 = gen_rtx_REG (Pmode, 12);
25557 rtx r0 = gen_rtx_REG (Pmode, 0);
25559 /* Sanity check for the addressing mode we're going to use. */
25560 gcc_assert (first <= 32768);
25562 /* Step 1: round SIZE to the previous multiple of the interval. */
25564 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25567 /* Step 2: compute initial and final value of the loop counter. */
25569 /* TEST_ADDR = SP + FIRST. */
25570 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25571 -first)));
25573 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25574 if (rounded_size > 32768)
25576 emit_move_insn (r0, GEN_INT (-rounded_size));
25577 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25579 else
25580 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25581 -rounded_size)));
25584 /* Step 3: the loop
25588 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25589 probe at TEST_ADDR
25591 while (TEST_ADDR != LAST_ADDR)
25593 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25594 until it is equal to ROUNDED_SIZE. */
25596 if (TARGET_64BIT)
25597 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
25598 else
25599 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
25602 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25603 that SIZE is equal to ROUNDED_SIZE. */
25605 if (size != rounded_size)
25606 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25610 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25611 absolute addresses. */
25613 const char *
25614 output_probe_stack_range (rtx reg1, rtx reg2)
25616 static int labelno = 0;
25617 char loop_lab[32];
25618 rtx xops[2];
25620 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25622 /* Loop. */
25623 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25625 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25626 xops[0] = reg1;
25627 xops[1] = GEN_INT (-PROBE_INTERVAL);
25628 output_asm_insn ("addi %0,%0,%1", xops);
25630 /* Probe at TEST_ADDR. */
25631 xops[1] = gen_rtx_REG (Pmode, 0);
25632 output_asm_insn ("stw %1,0(%0)", xops);
25634 /* Test if TEST_ADDR == LAST_ADDR. */
25635 xops[1] = reg2;
25636 if (TARGET_64BIT)
25637 output_asm_insn ("cmpd 0,%0,%1", xops);
25638 else
25639 output_asm_insn ("cmpw 0,%0,%1", xops);
25641 /* Branch. */
25642 fputs ("\tbne 0,", asm_out_file);
25643 assemble_name_raw (asm_out_file, loop_lab);
25644 fputc ('\n', asm_out_file);
25646 return "";
25649 /* This function is called when rs6000_frame_related is processing
25650 SETs within a PARALLEL, and returns whether the REGNO save ought to
25651 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25652 for out-of-line register save functions, store multiple, and the
25653 Darwin world_save. They may contain registers that don't really
25654 need saving. */
25656 static bool
25657 interesting_frame_related_regno (unsigned int regno)
25659 /* Saves apparently of r0 are actually saving LR. It doesn't make
25660 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25661 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25662 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25663 as frame related. */
25664 if (regno == 0)
25665 return true;
25666 /* If we see CR2 then we are here on a Darwin world save. Saves of
25667 CR2 signify the whole CR is being saved. This is a long-standing
25668 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25669 that CR needs to be saved. */
25670 if (regno == CR2_REGNO)
25671 return true;
25672 /* Omit frame info for any user-defined global regs. If frame info
25673 is supplied for them, frame unwinding will restore a user reg.
25674 Also omit frame info for any reg we don't need to save, as that
25675 bloats frame info and can cause problems with shrink wrapping.
25676 Since global regs won't be seen as needing to be saved, both of
25677 these conditions are covered by save_reg_p. */
25678 return save_reg_p (regno);
25681 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25682 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25683 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25684 deduce these equivalences by itself so it wasn't necessary to hold
25685 its hand so much. Don't be tempted to always supply d2_f_d_e with
25686 the actual cfa register, ie. r31 when we are using a hard frame
25687 pointer. That fails when saving regs off r1, and sched moves the
25688 r31 setup past the reg saves. */
25690 static rtx_insn *
25691 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25692 rtx reg2, rtx repl2)
25694 rtx repl;
25696 if (REGNO (reg) == STACK_POINTER_REGNUM)
25698 gcc_checking_assert (val == 0);
25699 repl = NULL_RTX;
25701 else
25702 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25703 GEN_INT (val));
25705 rtx pat = PATTERN (insn);
25706 if (!repl && !reg2)
25708 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25709 if (GET_CODE (pat) == PARALLEL)
25710 for (int i = 0; i < XVECLEN (pat, 0); i++)
25711 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25713 rtx set = XVECEXP (pat, 0, i);
25715 if (!REG_P (SET_SRC (set))
25716 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25717 RTX_FRAME_RELATED_P (set) = 1;
25719 RTX_FRAME_RELATED_P (insn) = 1;
25720 return insn;
25723 /* We expect that 'pat' is either a SET or a PARALLEL containing
25724 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25725 are important so they all have to be marked RTX_FRAME_RELATED_P.
25726 Call simplify_replace_rtx on the SETs rather than the whole insn
25727 so as to leave the other stuff alone (for example USE of r12). */
25729 set_used_flags (pat);
25730 if (GET_CODE (pat) == SET)
25732 if (repl)
25733 pat = simplify_replace_rtx (pat, reg, repl);
25734 if (reg2)
25735 pat = simplify_replace_rtx (pat, reg2, repl2);
25737 else if (GET_CODE (pat) == PARALLEL)
25739 pat = shallow_copy_rtx (pat);
25740 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25742 for (int i = 0; i < XVECLEN (pat, 0); i++)
25743 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25745 rtx set = XVECEXP (pat, 0, i);
25747 if (repl)
25748 set = simplify_replace_rtx (set, reg, repl);
25749 if (reg2)
25750 set = simplify_replace_rtx (set, reg2, repl2);
25751 XVECEXP (pat, 0, i) = set;
25753 if (!REG_P (SET_SRC (set))
25754 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25755 RTX_FRAME_RELATED_P (set) = 1;
25758 else
25759 gcc_unreachable ();
25761 RTX_FRAME_RELATED_P (insn) = 1;
25762 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25764 return insn;
25767 /* Returns an insn that has a vrsave set operation with the
25768 appropriate CLOBBERs. */
25770 static rtx
25771 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25773 int nclobs, i;
25774 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25775 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25777 clobs[0]
25778 = gen_rtx_SET (vrsave,
25779 gen_rtx_UNSPEC_VOLATILE (SImode,
25780 gen_rtvec (2, reg, vrsave),
25781 UNSPECV_SET_VRSAVE));
25783 nclobs = 1;
25785 /* We need to clobber the registers in the mask so the scheduler
25786 does not move sets to VRSAVE before sets of AltiVec registers.
25788 However, if the function receives nonlocal gotos, reload will set
25789 all call saved registers live. We will end up with:
25791 (set (reg 999) (mem))
25792 (parallel [ (set (reg vrsave) (unspec blah))
25793 (clobber (reg 999))])
25795 The clobber will cause the store into reg 999 to be dead, and
25796 flow will attempt to delete an epilogue insn. In this case, we
25797 need an unspec use/set of the register. */
25799 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25800 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25802 if (!epiloguep || call_used_regs [i])
25803 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25804 gen_rtx_REG (V4SImode, i));
25805 else
25807 rtx reg = gen_rtx_REG (V4SImode, i);
25809 clobs[nclobs++]
25810 = gen_rtx_SET (reg,
25811 gen_rtx_UNSPEC (V4SImode,
25812 gen_rtvec (1, reg), 27));
25816 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25818 for (i = 0; i < nclobs; ++i)
25819 XVECEXP (insn, 0, i) = clobs[i];
25821 return insn;
25824 static rtx
25825 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25827 rtx addr, mem;
25829 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25830 mem = gen_frame_mem (GET_MODE (reg), addr);
25831 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25834 static rtx
25835 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25837 return gen_frame_set (reg, frame_reg, offset, false);
25840 static rtx
25841 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25843 return gen_frame_set (reg, frame_reg, offset, true);
25846 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25847 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25849 static rtx_insn *
25850 emit_frame_save (rtx frame_reg, machine_mode mode,
25851 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25853 rtx reg;
25855 /* Some cases that need register indexed addressing. */
25856 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25857 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25859 reg = gen_rtx_REG (mode, regno);
25860 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25861 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25862 NULL_RTX, NULL_RTX);
25865 /* Emit an offset memory reference suitable for a frame store, while
25866 converting to a valid addressing mode. */
25868 static rtx
25869 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25871 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25874 #ifndef TARGET_FIX_AND_CONTINUE
25875 #define TARGET_FIX_AND_CONTINUE 0
25876 #endif
25878 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25879 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25880 #define LAST_SAVRES_REGISTER 31
25881 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25883 enum {
25884 SAVRES_LR = 0x1,
25885 SAVRES_SAVE = 0x2,
25886 SAVRES_REG = 0x0c,
25887 SAVRES_GPR = 0,
25888 SAVRES_FPR = 4,
25889 SAVRES_VR = 8
25892 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25894 /* Temporary holding space for an out-of-line register save/restore
25895 routine name. */
25896 static char savres_routine_name[30];
25898 /* Return the name for an out-of-line register save/restore routine.
25899 We are saving/restoring GPRs if GPR is true. */
25901 static char *
25902 rs6000_savres_routine_name (int regno, int sel)
25904 const char *prefix = "";
25905 const char *suffix = "";
25907 /* Different targets are supposed to define
25908 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25909 routine name could be defined with:
25911 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25913 This is a nice idea in practice, but in reality, things are
25914 complicated in several ways:
25916 - ELF targets have save/restore routines for GPRs.
25918 - PPC64 ELF targets have routines for save/restore of GPRs that
25919 differ in what they do with the link register, so having a set
25920 prefix doesn't work. (We only use one of the save routines at
25921 the moment, though.)
25923 - PPC32 elf targets have "exit" versions of the restore routines
25924 that restore the link register and can save some extra space.
25925 These require an extra suffix. (There are also "tail" versions
25926 of the restore routines and "GOT" versions of the save routines,
25927 but we don't generate those at present. Same problems apply,
25928 though.)
25930 We deal with all this by synthesizing our own prefix/suffix and
25931 using that for the simple sprintf call shown above. */
25932 if (DEFAULT_ABI == ABI_V4)
25934 if (TARGET_64BIT)
25935 goto aix_names;
25937 if ((sel & SAVRES_REG) == SAVRES_GPR)
25938 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25939 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25940 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25941 else if ((sel & SAVRES_REG) == SAVRES_VR)
25942 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25943 else
25944 abort ();
25946 if ((sel & SAVRES_LR))
25947 suffix = "_x";
25949 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25951 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25952 /* No out-of-line save/restore routines for GPRs on AIX. */
25953 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25954 #endif
25956 aix_names:
25957 if ((sel & SAVRES_REG) == SAVRES_GPR)
25958 prefix = ((sel & SAVRES_SAVE)
25959 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25960 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25961 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25963 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25964 if ((sel & SAVRES_LR))
25965 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25966 else
25967 #endif
25969 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25970 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25973 else if ((sel & SAVRES_REG) == SAVRES_VR)
25974 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25975 else
25976 abort ();
25979 if (DEFAULT_ABI == ABI_DARWIN)
25981 /* The Darwin approach is (slightly) different, in order to be
25982 compatible with code generated by the system toolchain. There is a
25983 single symbol for the start of save sequence, and the code here
25984 embeds an offset into that code on the basis of the first register
25985 to be saved. */
25986 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25987 if ((sel & SAVRES_REG) == SAVRES_GPR)
25988 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25989 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25990 (regno - 13) * 4, prefix, regno);
25991 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25992 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25993 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25994 else if ((sel & SAVRES_REG) == SAVRES_VR)
25995 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25996 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25997 else
25998 abort ();
26000 else
26001 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26003 return savres_routine_name;
26006 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26007 We are saving/restoring GPRs if GPR is true. */
26009 static rtx
26010 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26012 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26013 ? info->first_gp_reg_save
26014 : (sel & SAVRES_REG) == SAVRES_FPR
26015 ? info->first_fp_reg_save - 32
26016 : (sel & SAVRES_REG) == SAVRES_VR
26017 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26018 : -1);
26019 rtx sym;
26020 int select = sel;
26022 /* Don't generate bogus routine names. */
26023 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26024 && regno <= LAST_SAVRES_REGISTER
26025 && select >= 0 && select <= 12);
26027 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26029 if (sym == NULL)
26031 char *name;
26033 name = rs6000_savres_routine_name (regno, sel);
26035 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26036 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26037 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26040 return sym;
26043 /* Emit a sequence of insns, including a stack tie if needed, for
26044 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26045 reset the stack pointer, but move the base of the frame into
26046 reg UPDT_REGNO for use by out-of-line register restore routines. */
26048 static rtx
26049 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26050 unsigned updt_regno)
26052 /* If there is nothing to do, don't do anything. */
26053 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26054 return NULL_RTX;
26056 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26058 /* This blockage is needed so that sched doesn't decide to move
26059 the sp change before the register restores. */
26060 if (DEFAULT_ABI == ABI_V4)
26061 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26062 GEN_INT (frame_off)));
26064 /* If we are restoring registers out-of-line, we will be using the
26065 "exit" variants of the restore routines, which will reset the
26066 stack for us. But we do need to point updt_reg into the
26067 right place for those routines. */
26068 if (frame_off != 0)
26069 return emit_insn (gen_add3_insn (updt_reg_rtx,
26070 frame_reg_rtx, GEN_INT (frame_off)));
26071 else
26072 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26074 return NULL_RTX;
26077 /* Return the register number used as a pointer by out-of-line
26078 save/restore functions. */
26080 static inline unsigned
26081 ptr_regno_for_savres (int sel)
26083 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26084 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26085 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26088 /* Construct a parallel rtx describing the effect of a call to an
26089 out-of-line register save/restore routine, and emit the insn
26090 or jump_insn as appropriate. */
26092 static rtx_insn *
26093 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26094 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26095 machine_mode reg_mode, int sel)
26097 int i;
26098 int offset, start_reg, end_reg, n_regs, use_reg;
26099 int reg_size = GET_MODE_SIZE (reg_mode);
26100 rtx sym;
26101 rtvec p;
26102 rtx par;
26103 rtx_insn *insn;
26105 offset = 0;
26106 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26107 ? info->first_gp_reg_save
26108 : (sel & SAVRES_REG) == SAVRES_FPR
26109 ? info->first_fp_reg_save
26110 : (sel & SAVRES_REG) == SAVRES_VR
26111 ? info->first_altivec_reg_save
26112 : -1);
26113 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26114 ? 32
26115 : (sel & SAVRES_REG) == SAVRES_FPR
26116 ? 64
26117 : (sel & SAVRES_REG) == SAVRES_VR
26118 ? LAST_ALTIVEC_REGNO + 1
26119 : -1);
26120 n_regs = end_reg - start_reg;
26121 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26122 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26123 + n_regs);
26125 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26126 RTVEC_ELT (p, offset++) = ret_rtx;
26128 RTVEC_ELT (p, offset++)
26129 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
26131 sym = rs6000_savres_routine_sym (info, sel);
26132 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26134 use_reg = ptr_regno_for_savres (sel);
26135 if ((sel & SAVRES_REG) == SAVRES_VR)
26137 /* Vector regs are saved/restored using [reg+reg] addressing. */
26138 RTVEC_ELT (p, offset++)
26139 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26140 RTVEC_ELT (p, offset++)
26141 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26143 else
26144 RTVEC_ELT (p, offset++)
26145 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26147 for (i = 0; i < end_reg - start_reg; i++)
26148 RTVEC_ELT (p, i + offset)
26149 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26150 frame_reg_rtx, save_area_offset + reg_size * i,
26151 (sel & SAVRES_SAVE) != 0);
26153 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26154 RTVEC_ELT (p, i + offset)
26155 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26157 par = gen_rtx_PARALLEL (VOIDmode, p);
26159 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26161 insn = emit_jump_insn (par);
26162 JUMP_LABEL (insn) = ret_rtx;
26164 else
26165 insn = emit_insn (par);
26166 return insn;
26169 /* Emit prologue code to store CR fields that need to be saved into REG. This
26170 function should only be called when moving the non-volatile CRs to REG, it
26171 is not a general purpose routine to move the entire set of CRs to REG.
26172 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26173 volatile CRs. */
26175 static void
26176 rs6000_emit_prologue_move_from_cr (rtx reg)
26178 /* Only the ELFv2 ABI allows storing only selected fields. */
26179 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26181 int i, cr_reg[8], count = 0;
26183 /* Collect CR fields that must be saved. */
26184 for (i = 0; i < 8; i++)
26185 if (save_reg_p (CR0_REGNO + i))
26186 cr_reg[count++] = i;
26188 /* If it's just a single one, use mfcrf. */
26189 if (count == 1)
26191 rtvec p = rtvec_alloc (1);
26192 rtvec r = rtvec_alloc (2);
26193 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26194 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26195 RTVEC_ELT (p, 0)
26196 = gen_rtx_SET (reg,
26197 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26199 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26200 return;
26203 /* ??? It might be better to handle count == 2 / 3 cases here
26204 as well, using logical operations to combine the values. */
26207 emit_insn (gen_prologue_movesi_from_cr (reg));
26210 /* Return whether the split-stack arg pointer (r12) is used. */
26212 static bool
26213 split_stack_arg_pointer_used_p (void)
26215 /* If the pseudo holding the arg pointer is no longer a pseudo,
26216 then the arg pointer is used. */
26217 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26218 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26219 || (REGNO (cfun->machine->split_stack_arg_pointer)
26220 < FIRST_PSEUDO_REGISTER)))
26221 return true;
26223 /* Unfortunately we also need to do some code scanning, since
26224 r12 may have been substituted for the pseudo. */
26225 rtx_insn *insn;
26226 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26227 FOR_BB_INSNS (bb, insn)
26228 if (NONDEBUG_INSN_P (insn))
26230 /* A call destroys r12. */
26231 if (CALL_P (insn))
26232 return false;
26234 df_ref use;
26235 FOR_EACH_INSN_USE (use, insn)
26237 rtx x = DF_REF_REG (use);
26238 if (REG_P (x) && REGNO (x) == 12)
26239 return true;
26241 df_ref def;
26242 FOR_EACH_INSN_DEF (def, insn)
26244 rtx x = DF_REF_REG (def);
26245 if (REG_P (x) && REGNO (x) == 12)
26246 return false;
26249 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26252 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26254 static bool
26255 rs6000_global_entry_point_needed_p (void)
26257 /* Only needed for the ELFv2 ABI. */
26258 if (DEFAULT_ABI != ABI_ELFv2)
26259 return false;
26261 /* With -msingle-pic-base, we assume the whole program shares the same
26262 TOC, so no global entry point prologues are needed anywhere. */
26263 if (TARGET_SINGLE_PIC_BASE)
26264 return false;
26266 /* Ensure we have a global entry point for thunks. ??? We could
26267 avoid that if the target routine doesn't need a global entry point,
26268 but we do not know whether this is the case at this point. */
26269 if (cfun->is_thunk)
26270 return true;
26272 /* For regular functions, rs6000_emit_prologue sets this flag if the
26273 routine ever uses the TOC pointer. */
26274 return cfun->machine->r2_setup_needed;
26277 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26278 static sbitmap
26279 rs6000_get_separate_components (void)
26281 rs6000_stack_t *info = rs6000_stack_info ();
26283 if (WORLD_SAVE_P (info))
26284 return NULL;
26286 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26287 && !(info->savres_strategy & REST_MULTIPLE));
26289 /* Component 0 is the save/restore of LR (done via GPR0).
26290 Components 13..31 are the save/restore of GPR13..GPR31.
26291 Components 46..63 are the save/restore of FPR14..FPR31. */
26293 cfun->machine->n_components = 64;
26295 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26296 bitmap_clear (components);
26298 int reg_size = TARGET_32BIT ? 4 : 8;
26299 int fp_reg_size = 8;
26301 /* The GPRs we need saved to the frame. */
26302 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26303 && (info->savres_strategy & REST_INLINE_GPRS))
26305 int offset = info->gp_save_offset;
26306 if (info->push_p)
26307 offset += info->total_size;
26309 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26311 if (IN_RANGE (offset, -0x8000, 0x7fff)
26312 && save_reg_p (regno))
26313 bitmap_set_bit (components, regno);
26315 offset += reg_size;
26319 /* Don't mess with the hard frame pointer. */
26320 if (frame_pointer_needed)
26321 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26323 /* Don't mess with the fixed TOC register. */
26324 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26325 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26326 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26327 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26329 /* The FPRs we need saved to the frame. */
26330 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26331 && (info->savres_strategy & REST_INLINE_FPRS))
26333 int offset = info->fp_save_offset;
26334 if (info->push_p)
26335 offset += info->total_size;
26337 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26339 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26340 bitmap_set_bit (components, regno);
26342 offset += fp_reg_size;
26346 /* Optimize LR save and restore if we can. This is component 0. Any
26347 out-of-line register save/restore routines need LR. */
26348 if (info->lr_save_p
26349 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26350 && (info->savres_strategy & SAVE_INLINE_GPRS)
26351 && (info->savres_strategy & REST_INLINE_GPRS)
26352 && (info->savres_strategy & SAVE_INLINE_FPRS)
26353 && (info->savres_strategy & REST_INLINE_FPRS)
26354 && (info->savres_strategy & SAVE_INLINE_VRS)
26355 && (info->savres_strategy & REST_INLINE_VRS))
26357 int offset = info->lr_save_offset;
26358 if (info->push_p)
26359 offset += info->total_size;
26360 if (IN_RANGE (offset, -0x8000, 0x7fff))
26361 bitmap_set_bit (components, 0);
26364 return components;
26367 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26368 static sbitmap
26369 rs6000_components_for_bb (basic_block bb)
26371 rs6000_stack_t *info = rs6000_stack_info ();
26373 bitmap in = DF_LIVE_IN (bb);
26374 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26375 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26377 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26378 bitmap_clear (components);
26380 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26382 /* GPRs. */
26383 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26384 if (bitmap_bit_p (in, regno)
26385 || bitmap_bit_p (gen, regno)
26386 || bitmap_bit_p (kill, regno))
26387 bitmap_set_bit (components, regno);
26389 /* FPRs. */
26390 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26391 if (bitmap_bit_p (in, regno)
26392 || bitmap_bit_p (gen, regno)
26393 || bitmap_bit_p (kill, regno))
26394 bitmap_set_bit (components, regno);
26396 /* The link register. */
26397 if (bitmap_bit_p (in, LR_REGNO)
26398 || bitmap_bit_p (gen, LR_REGNO)
26399 || bitmap_bit_p (kill, LR_REGNO))
26400 bitmap_set_bit (components, 0);
26402 return components;
26405 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26406 static void
26407 rs6000_disqualify_components (sbitmap components, edge e,
26408 sbitmap edge_components, bool /*is_prologue*/)
26410 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26411 live where we want to place that code. */
26412 if (bitmap_bit_p (edge_components, 0)
26413 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26415 if (dump_file)
26416 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26417 "on entry to bb %d\n", e->dest->index);
26418 bitmap_clear_bit (components, 0);
26422 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26423 static void
26424 rs6000_emit_prologue_components (sbitmap components)
26426 rs6000_stack_t *info = rs6000_stack_info ();
26427 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26428 ? HARD_FRAME_POINTER_REGNUM
26429 : STACK_POINTER_REGNUM);
26431 machine_mode reg_mode = Pmode;
26432 int reg_size = TARGET_32BIT ? 4 : 8;
26433 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26434 ? DFmode : SFmode;
26435 int fp_reg_size = 8;
26437 /* Prologue for LR. */
26438 if (bitmap_bit_p (components, 0))
26440 rtx reg = gen_rtx_REG (reg_mode, 0);
26441 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (reg_mode, LR_REGNO));
26442 RTX_FRAME_RELATED_P (insn) = 1;
26443 add_reg_note (insn, REG_CFA_REGISTER, NULL);
26445 int offset = info->lr_save_offset;
26446 if (info->push_p)
26447 offset += info->total_size;
26449 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26450 RTX_FRAME_RELATED_P (insn) = 1;
26451 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26452 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26453 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26456 /* Prologue for the GPRs. */
26457 int offset = info->gp_save_offset;
26458 if (info->push_p)
26459 offset += info->total_size;
26461 for (int i = info->first_gp_reg_save; i < 32; i++)
26463 if (bitmap_bit_p (components, i))
26465 rtx reg = gen_rtx_REG (reg_mode, i);
26466 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26467 RTX_FRAME_RELATED_P (insn) = 1;
26468 rtx set = copy_rtx (single_set (insn));
26469 add_reg_note (insn, REG_CFA_OFFSET, set);
26472 offset += reg_size;
26475 /* Prologue for the FPRs. */
26476 offset = info->fp_save_offset;
26477 if (info->push_p)
26478 offset += info->total_size;
26480 for (int i = info->first_fp_reg_save; i < 64; i++)
26482 if (bitmap_bit_p (components, i))
26484 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26485 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26486 RTX_FRAME_RELATED_P (insn) = 1;
26487 rtx set = copy_rtx (single_set (insn));
26488 add_reg_note (insn, REG_CFA_OFFSET, set);
26491 offset += fp_reg_size;
26495 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26496 static void
26497 rs6000_emit_epilogue_components (sbitmap components)
26499 rs6000_stack_t *info = rs6000_stack_info ();
26500 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26501 ? HARD_FRAME_POINTER_REGNUM
26502 : STACK_POINTER_REGNUM);
26504 machine_mode reg_mode = Pmode;
26505 int reg_size = TARGET_32BIT ? 4 : 8;
26507 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26508 ? DFmode : SFmode;
26509 int fp_reg_size = 8;
26511 /* Epilogue for the FPRs. */
26512 int offset = info->fp_save_offset;
26513 if (info->push_p)
26514 offset += info->total_size;
26516 for (int i = info->first_fp_reg_save; i < 64; i++)
26518 if (bitmap_bit_p (components, i))
26520 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26521 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26522 RTX_FRAME_RELATED_P (insn) = 1;
26523 add_reg_note (insn, REG_CFA_RESTORE, reg);
26526 offset += fp_reg_size;
26529 /* Epilogue for the GPRs. */
26530 offset = info->gp_save_offset;
26531 if (info->push_p)
26532 offset += info->total_size;
26534 for (int i = info->first_gp_reg_save; i < 32; i++)
26536 if (bitmap_bit_p (components, i))
26538 rtx reg = gen_rtx_REG (reg_mode, i);
26539 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26540 RTX_FRAME_RELATED_P (insn) = 1;
26541 add_reg_note (insn, REG_CFA_RESTORE, reg);
26544 offset += reg_size;
26547 /* Epilogue for LR. */
26548 if (bitmap_bit_p (components, 0))
26550 int offset = info->lr_save_offset;
26551 if (info->push_p)
26552 offset += info->total_size;
26554 rtx reg = gen_rtx_REG (reg_mode, 0);
26555 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26557 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26558 insn = emit_move_insn (lr, reg);
26559 RTX_FRAME_RELATED_P (insn) = 1;
26560 add_reg_note (insn, REG_CFA_RESTORE, lr);
26564 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26565 static void
26566 rs6000_set_handled_components (sbitmap components)
26568 rs6000_stack_t *info = rs6000_stack_info ();
26570 for (int i = info->first_gp_reg_save; i < 32; i++)
26571 if (bitmap_bit_p (components, i))
26572 cfun->machine->gpr_is_wrapped_separately[i] = true;
26574 for (int i = info->first_fp_reg_save; i < 64; i++)
26575 if (bitmap_bit_p (components, i))
26576 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26578 if (bitmap_bit_p (components, 0))
26579 cfun->machine->lr_is_wrapped_separately = true;
26582 /* VRSAVE is a bit vector representing which AltiVec registers
26583 are used. The OS uses this to determine which vector
26584 registers to save on a context switch. We need to save
26585 VRSAVE on the stack frame, add whatever AltiVec registers we
26586 used in this function, and do the corresponding magic in the
26587 epilogue. */
26588 static void
26589 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26590 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26592 /* Get VRSAVE into a GPR. */
26593 rtx reg = gen_rtx_REG (SImode, save_regno);
26594 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26595 if (TARGET_MACHO)
26596 emit_insn (gen_get_vrsave_internal (reg));
26597 else
26598 emit_insn (gen_rtx_SET (reg, vrsave));
26600 /* Save VRSAVE. */
26601 int offset = info->vrsave_save_offset + frame_off;
26602 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26604 /* Include the registers in the mask. */
26605 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26607 emit_insn (generate_set_vrsave (reg, info, 0));
26610 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26611 called, it left the arg pointer to the old stack in r29. Otherwise, the
26612 arg pointer is the top of the current frame. */
26613 static void
26614 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26615 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26617 cfun->machine->split_stack_argp_used = true;
26619 if (sp_adjust)
26621 rtx r12 = gen_rtx_REG (Pmode, 12);
26622 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26623 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26624 emit_insn_before (set_r12, sp_adjust);
26626 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26628 rtx r12 = gen_rtx_REG (Pmode, 12);
26629 if (frame_off == 0)
26630 emit_move_insn (r12, frame_reg_rtx);
26631 else
26632 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26635 if (info->push_p)
26637 rtx r12 = gen_rtx_REG (Pmode, 12);
26638 rtx r29 = gen_rtx_REG (Pmode, 29);
26639 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26640 rtx not_more = gen_label_rtx ();
26641 rtx jump;
26643 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26644 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26645 gen_rtx_LABEL_REF (VOIDmode, not_more),
26646 pc_rtx);
26647 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26648 JUMP_LABEL (jump) = not_more;
26649 LABEL_NUSES (not_more) += 1;
26650 emit_move_insn (r12, r29);
26651 emit_label (not_more);
26655 /* Emit function prologue as insns. */
26657 void
26658 rs6000_emit_prologue (void)
26660 rs6000_stack_t *info = rs6000_stack_info ();
26661 machine_mode reg_mode = Pmode;
26662 int reg_size = TARGET_32BIT ? 4 : 8;
26663 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
26664 ? DFmode : SFmode;
26665 int fp_reg_size = 8;
26666 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26667 rtx frame_reg_rtx = sp_reg_rtx;
26668 unsigned int cr_save_regno;
26669 rtx cr_save_rtx = NULL_RTX;
26670 rtx_insn *insn;
26671 int strategy;
26672 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26673 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26674 && call_used_regs[STATIC_CHAIN_REGNUM]);
26675 int using_split_stack = (flag_split_stack
26676 && (lookup_attribute ("no_split_stack",
26677 DECL_ATTRIBUTES (cfun->decl))
26678 == NULL));
26680 /* Offset to top of frame for frame_reg and sp respectively. */
26681 HOST_WIDE_INT frame_off = 0;
26682 HOST_WIDE_INT sp_off = 0;
26683 /* sp_adjust is the stack adjusting instruction, tracked so that the
26684 insn setting up the split-stack arg pointer can be emitted just
26685 prior to it, when r12 is not used here for other purposes. */
26686 rtx_insn *sp_adjust = 0;
26688 #if CHECKING_P
26689 /* Track and check usage of r0, r11, r12. */
26690 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26691 #define START_USE(R) do \
26693 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26694 reg_inuse |= 1 << (R); \
26695 } while (0)
26696 #define END_USE(R) do \
26698 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26699 reg_inuse &= ~(1 << (R)); \
26700 } while (0)
26701 #define NOT_INUSE(R) do \
26703 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26704 } while (0)
26705 #else
26706 #define START_USE(R) do {} while (0)
26707 #define END_USE(R) do {} while (0)
26708 #define NOT_INUSE(R) do {} while (0)
26709 #endif
26711 if (DEFAULT_ABI == ABI_ELFv2
26712 && !TARGET_SINGLE_PIC_BASE)
26714 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26716 /* With -mminimal-toc we may generate an extra use of r2 below. */
26717 if (TARGET_TOC && TARGET_MINIMAL_TOC
26718 && !constant_pool_empty_p ())
26719 cfun->machine->r2_setup_needed = true;
26723 if (flag_stack_usage_info)
26724 current_function_static_stack_size = info->total_size;
26726 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26728 HOST_WIDE_INT size = info->total_size;
26730 if (crtl->is_leaf && !cfun->calls_alloca)
26732 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
26733 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
26734 size - STACK_CHECK_PROTECT);
26736 else if (size > 0)
26737 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
26740 if (TARGET_FIX_AND_CONTINUE)
26742 /* gdb on darwin arranges to forward a function from the old
26743 address by modifying the first 5 instructions of the function
26744 to branch to the overriding function. This is necessary to
26745 permit function pointers that point to the old function to
26746 actually forward to the new function. */
26747 emit_insn (gen_nop ());
26748 emit_insn (gen_nop ());
26749 emit_insn (gen_nop ());
26750 emit_insn (gen_nop ());
26751 emit_insn (gen_nop ());
26754 /* Handle world saves specially here. */
26755 if (WORLD_SAVE_P (info))
26757 int i, j, sz;
26758 rtx treg;
26759 rtvec p;
26760 rtx reg0;
26762 /* save_world expects lr in r0. */
26763 reg0 = gen_rtx_REG (Pmode, 0);
26764 if (info->lr_save_p)
26766 insn = emit_move_insn (reg0,
26767 gen_rtx_REG (Pmode, LR_REGNO));
26768 RTX_FRAME_RELATED_P (insn) = 1;
26771 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26772 assumptions about the offsets of various bits of the stack
26773 frame. */
26774 gcc_assert (info->gp_save_offset == -220
26775 && info->fp_save_offset == -144
26776 && info->lr_save_offset == 8
26777 && info->cr_save_offset == 4
26778 && info->push_p
26779 && info->lr_save_p
26780 && (!crtl->calls_eh_return
26781 || info->ehrd_offset == -432)
26782 && info->vrsave_save_offset == -224
26783 && info->altivec_save_offset == -416);
26785 treg = gen_rtx_REG (SImode, 11);
26786 emit_move_insn (treg, GEN_INT (-info->total_size));
26788 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26789 in R11. It also clobbers R12, so beware! */
26791 /* Preserve CR2 for save_world prologues */
26792 sz = 5;
26793 sz += 32 - info->first_gp_reg_save;
26794 sz += 64 - info->first_fp_reg_save;
26795 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26796 p = rtvec_alloc (sz);
26797 j = 0;
26798 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26799 gen_rtx_REG (SImode,
26800 LR_REGNO));
26801 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26802 gen_rtx_SYMBOL_REF (Pmode,
26803 "*save_world"));
26804 /* We do floats first so that the instruction pattern matches
26805 properly. */
26806 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26807 RTVEC_ELT (p, j++)
26808 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
26809 ? DFmode : SFmode,
26810 info->first_fp_reg_save + i),
26811 frame_reg_rtx,
26812 info->fp_save_offset + frame_off + 8 * i);
26813 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26814 RTVEC_ELT (p, j++)
26815 = gen_frame_store (gen_rtx_REG (V4SImode,
26816 info->first_altivec_reg_save + i),
26817 frame_reg_rtx,
26818 info->altivec_save_offset + frame_off + 16 * i);
26819 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26820 RTVEC_ELT (p, j++)
26821 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26822 frame_reg_rtx,
26823 info->gp_save_offset + frame_off + reg_size * i);
26825 /* CR register traditionally saved as CR2. */
26826 RTVEC_ELT (p, j++)
26827 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26828 frame_reg_rtx, info->cr_save_offset + frame_off);
26829 /* Explain about use of R0. */
26830 if (info->lr_save_p)
26831 RTVEC_ELT (p, j++)
26832 = gen_frame_store (reg0,
26833 frame_reg_rtx, info->lr_save_offset + frame_off);
26834 /* Explain what happens to the stack pointer. */
26836 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26837 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26840 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26841 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26842 treg, GEN_INT (-info->total_size));
26843 sp_off = frame_off = info->total_size;
26846 strategy = info->savres_strategy;
26848 /* For V.4, update stack before we do any saving and set back pointer. */
26849 if (! WORLD_SAVE_P (info)
26850 && info->push_p
26851 && (DEFAULT_ABI == ABI_V4
26852 || crtl->calls_eh_return))
26854 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26855 || !(strategy & SAVE_INLINE_GPRS)
26856 || !(strategy & SAVE_INLINE_VRS));
26857 int ptr_regno = -1;
26858 rtx ptr_reg = NULL_RTX;
26859 int ptr_off = 0;
26861 if (info->total_size < 32767)
26862 frame_off = info->total_size;
26863 else if (need_r11)
26864 ptr_regno = 11;
26865 else if (info->cr_save_p
26866 || info->lr_save_p
26867 || info->first_fp_reg_save < 64
26868 || info->first_gp_reg_save < 32
26869 || info->altivec_size != 0
26870 || info->vrsave_size != 0
26871 || crtl->calls_eh_return)
26872 ptr_regno = 12;
26873 else
26875 /* The prologue won't be saving any regs so there is no need
26876 to set up a frame register to access any frame save area.
26877 We also won't be using frame_off anywhere below, but set
26878 the correct value anyway to protect against future
26879 changes to this function. */
26880 frame_off = info->total_size;
26882 if (ptr_regno != -1)
26884 /* Set up the frame offset to that needed by the first
26885 out-of-line save function. */
26886 START_USE (ptr_regno);
26887 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26888 frame_reg_rtx = ptr_reg;
26889 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26890 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26891 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26892 ptr_off = info->gp_save_offset + info->gp_size;
26893 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26894 ptr_off = info->altivec_save_offset + info->altivec_size;
26895 frame_off = -ptr_off;
26897 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26898 ptr_reg, ptr_off);
26899 if (REGNO (frame_reg_rtx) == 12)
26900 sp_adjust = 0;
26901 sp_off = info->total_size;
26902 if (frame_reg_rtx != sp_reg_rtx)
26903 rs6000_emit_stack_tie (frame_reg_rtx, false);
26906 /* If we use the link register, get it into r0. */
26907 if (!WORLD_SAVE_P (info) && info->lr_save_p
26908 && !cfun->machine->lr_is_wrapped_separately)
26910 rtx addr, reg, mem;
26912 reg = gen_rtx_REG (Pmode, 0);
26913 START_USE (0);
26914 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26915 RTX_FRAME_RELATED_P (insn) = 1;
26917 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26918 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26920 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26921 GEN_INT (info->lr_save_offset + frame_off));
26922 mem = gen_rtx_MEM (Pmode, addr);
26923 /* This should not be of rs6000_sr_alias_set, because of
26924 __builtin_return_address. */
26926 insn = emit_move_insn (mem, reg);
26927 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26928 NULL_RTX, NULL_RTX);
26929 END_USE (0);
26933 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26934 r12 will be needed by out-of-line gpr restore. */
26935 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26936 && !(strategy & (SAVE_INLINE_GPRS
26937 | SAVE_NOINLINE_GPRS_SAVES_LR))
26938 ? 11 : 12);
26939 if (!WORLD_SAVE_P (info)
26940 && info->cr_save_p
26941 && REGNO (frame_reg_rtx) != cr_save_regno
26942 && !(using_static_chain_p && cr_save_regno == 11)
26943 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26945 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26946 START_USE (cr_save_regno);
26947 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26950 /* Do any required saving of fpr's. If only one or two to save, do
26951 it ourselves. Otherwise, call function. */
26952 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26954 int offset = info->fp_save_offset + frame_off;
26955 for (int i = info->first_fp_reg_save; i < 64; i++)
26957 if (save_reg_p (i)
26958 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26959 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26960 sp_off - frame_off);
26962 offset += fp_reg_size;
26965 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26967 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26968 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26969 unsigned ptr_regno = ptr_regno_for_savres (sel);
26970 rtx ptr_reg = frame_reg_rtx;
26972 if (REGNO (frame_reg_rtx) == ptr_regno)
26973 gcc_checking_assert (frame_off == 0);
26974 else
26976 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26977 NOT_INUSE (ptr_regno);
26978 emit_insn (gen_add3_insn (ptr_reg,
26979 frame_reg_rtx, GEN_INT (frame_off)));
26981 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26982 info->fp_save_offset,
26983 info->lr_save_offset,
26984 DFmode, sel);
26985 rs6000_frame_related (insn, ptr_reg, sp_off,
26986 NULL_RTX, NULL_RTX);
26987 if (lr)
26988 END_USE (0);
26991 /* Save GPRs. This is done as a PARALLEL if we are using
26992 the store-multiple instructions. */
26993 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26995 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26996 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26997 unsigned ptr_regno = ptr_regno_for_savres (sel);
26998 rtx ptr_reg = frame_reg_rtx;
26999 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27000 int end_save = info->gp_save_offset + info->gp_size;
27001 int ptr_off;
27003 if (ptr_regno == 12)
27004 sp_adjust = 0;
27005 if (!ptr_set_up)
27006 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27008 /* Need to adjust r11 (r12) if we saved any FPRs. */
27009 if (end_save + frame_off != 0)
27011 rtx offset = GEN_INT (end_save + frame_off);
27013 if (ptr_set_up)
27014 frame_off = -end_save;
27015 else
27016 NOT_INUSE (ptr_regno);
27017 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27019 else if (!ptr_set_up)
27021 NOT_INUSE (ptr_regno);
27022 emit_move_insn (ptr_reg, frame_reg_rtx);
27024 ptr_off = -end_save;
27025 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27026 info->gp_save_offset + ptr_off,
27027 info->lr_save_offset + ptr_off,
27028 reg_mode, sel);
27029 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27030 NULL_RTX, NULL_RTX);
27031 if (lr)
27032 END_USE (0);
27034 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27036 rtvec p;
27037 int i;
27038 p = rtvec_alloc (32 - info->first_gp_reg_save);
27039 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27040 RTVEC_ELT (p, i)
27041 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27042 frame_reg_rtx,
27043 info->gp_save_offset + frame_off + reg_size * i);
27044 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27045 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27046 NULL_RTX, NULL_RTX);
27048 else if (!WORLD_SAVE_P (info))
27050 int offset = info->gp_save_offset + frame_off;
27051 for (int i = info->first_gp_reg_save; i < 32; i++)
27053 if (save_reg_p (i)
27054 && !cfun->machine->gpr_is_wrapped_separately[i])
27055 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27056 sp_off - frame_off);
27058 offset += reg_size;
27062 if (crtl->calls_eh_return)
27064 unsigned int i;
27065 rtvec p;
27067 for (i = 0; ; ++i)
27069 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27070 if (regno == INVALID_REGNUM)
27071 break;
27074 p = rtvec_alloc (i);
27076 for (i = 0; ; ++i)
27078 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27079 if (regno == INVALID_REGNUM)
27080 break;
27082 rtx set
27083 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27084 sp_reg_rtx,
27085 info->ehrd_offset + sp_off + reg_size * (int) i);
27086 RTVEC_ELT (p, i) = set;
27087 RTX_FRAME_RELATED_P (set) = 1;
27090 insn = emit_insn (gen_blockage ());
27091 RTX_FRAME_RELATED_P (insn) = 1;
27092 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27095 /* In AIX ABI we need to make sure r2 is really saved. */
27096 if (TARGET_AIX && crtl->calls_eh_return)
27098 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27099 rtx join_insn, note;
27100 rtx_insn *save_insn;
27101 long toc_restore_insn;
27103 tmp_reg = gen_rtx_REG (Pmode, 11);
27104 tmp_reg_si = gen_rtx_REG (SImode, 11);
27105 if (using_static_chain_p)
27107 START_USE (0);
27108 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27110 else
27111 START_USE (11);
27112 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27113 /* Peek at instruction to which this function returns. If it's
27114 restoring r2, then we know we've already saved r2. We can't
27115 unconditionally save r2 because the value we have will already
27116 be updated if we arrived at this function via a plt call or
27117 toc adjusting stub. */
27118 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27119 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27120 + RS6000_TOC_SAVE_SLOT);
27121 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27122 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27123 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27124 validate_condition_mode (EQ, CCUNSmode);
27125 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27126 emit_insn (gen_rtx_SET (compare_result,
27127 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27128 toc_save_done = gen_label_rtx ();
27129 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27130 gen_rtx_EQ (VOIDmode, compare_result,
27131 const0_rtx),
27132 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27133 pc_rtx);
27134 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27135 JUMP_LABEL (jump) = toc_save_done;
27136 LABEL_NUSES (toc_save_done) += 1;
27138 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27139 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27140 sp_off - frame_off);
27142 emit_label (toc_save_done);
27144 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27145 have a CFG that has different saves along different paths.
27146 Move the note to a dummy blockage insn, which describes that
27147 R2 is unconditionally saved after the label. */
27148 /* ??? An alternate representation might be a special insn pattern
27149 containing both the branch and the store. That might let the
27150 code that minimizes the number of DW_CFA_advance opcodes better
27151 freedom in placing the annotations. */
27152 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27153 if (note)
27154 remove_note (save_insn, note);
27155 else
27156 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27157 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27158 RTX_FRAME_RELATED_P (save_insn) = 0;
27160 join_insn = emit_insn (gen_blockage ());
27161 REG_NOTES (join_insn) = note;
27162 RTX_FRAME_RELATED_P (join_insn) = 1;
27164 if (using_static_chain_p)
27166 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27167 END_USE (0);
27169 else
27170 END_USE (11);
27173 /* Save CR if we use any that must be preserved. */
27174 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27176 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27177 GEN_INT (info->cr_save_offset + frame_off));
27178 rtx mem = gen_frame_mem (SImode, addr);
27180 /* If we didn't copy cr before, do so now using r0. */
27181 if (cr_save_rtx == NULL_RTX)
27183 START_USE (0);
27184 cr_save_rtx = gen_rtx_REG (SImode, 0);
27185 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27188 /* Saving CR requires a two-instruction sequence: one instruction
27189 to move the CR to a general-purpose register, and a second
27190 instruction that stores the GPR to memory.
27192 We do not emit any DWARF CFI records for the first of these,
27193 because we cannot properly represent the fact that CR is saved in
27194 a register. One reason is that we cannot express that multiple
27195 CR fields are saved; another reason is that on 64-bit, the size
27196 of the CR register in DWARF (4 bytes) differs from the size of
27197 a general-purpose register.
27199 This means if any intervening instruction were to clobber one of
27200 the call-saved CR fields, we'd have incorrect CFI. To prevent
27201 this from happening, we mark the store to memory as a use of
27202 those CR fields, which prevents any such instruction from being
27203 scheduled in between the two instructions. */
27204 rtx crsave_v[9];
27205 int n_crsave = 0;
27206 int i;
27208 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27209 for (i = 0; i < 8; i++)
27210 if (save_reg_p (CR0_REGNO + i))
27211 crsave_v[n_crsave++]
27212 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27214 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27215 gen_rtvec_v (n_crsave, crsave_v)));
27216 END_USE (REGNO (cr_save_rtx));
27218 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27219 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27220 so we need to construct a frame expression manually. */
27221 RTX_FRAME_RELATED_P (insn) = 1;
27223 /* Update address to be stack-pointer relative, like
27224 rs6000_frame_related would do. */
27225 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27226 GEN_INT (info->cr_save_offset + sp_off));
27227 mem = gen_frame_mem (SImode, addr);
27229 if (DEFAULT_ABI == ABI_ELFv2)
27231 /* In the ELFv2 ABI we generate separate CFI records for each
27232 CR field that was actually saved. They all point to the
27233 same 32-bit stack slot. */
27234 rtx crframe[8];
27235 int n_crframe = 0;
27237 for (i = 0; i < 8; i++)
27238 if (save_reg_p (CR0_REGNO + i))
27240 crframe[n_crframe]
27241 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27243 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27244 n_crframe++;
27247 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27248 gen_rtx_PARALLEL (VOIDmode,
27249 gen_rtvec_v (n_crframe, crframe)));
27251 else
27253 /* In other ABIs, by convention, we use a single CR regnum to
27254 represent the fact that all call-saved CR fields are saved.
27255 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27256 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27257 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27261 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27262 *separate* slots if the routine calls __builtin_eh_return, so
27263 that they can be independently restored by the unwinder. */
27264 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27266 int i, cr_off = info->ehcr_offset;
27267 rtx crsave;
27269 /* ??? We might get better performance by using multiple mfocrf
27270 instructions. */
27271 crsave = gen_rtx_REG (SImode, 0);
27272 emit_insn (gen_prologue_movesi_from_cr (crsave));
27274 for (i = 0; i < 8; i++)
27275 if (!call_used_regs[CR0_REGNO + i])
27277 rtvec p = rtvec_alloc (2);
27278 RTVEC_ELT (p, 0)
27279 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27280 RTVEC_ELT (p, 1)
27281 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27283 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27285 RTX_FRAME_RELATED_P (insn) = 1;
27286 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27287 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27288 sp_reg_rtx, cr_off + sp_off));
27290 cr_off += reg_size;
27294 /* Update stack and set back pointer unless this is V.4,
27295 for which it was done previously. */
27296 if (!WORLD_SAVE_P (info) && info->push_p
27297 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27299 rtx ptr_reg = NULL;
27300 int ptr_off = 0;
27302 /* If saving altivec regs we need to be able to address all save
27303 locations using a 16-bit offset. */
27304 if ((strategy & SAVE_INLINE_VRS) == 0
27305 || (info->altivec_size != 0
27306 && (info->altivec_save_offset + info->altivec_size - 16
27307 + info->total_size - frame_off) > 32767)
27308 || (info->vrsave_size != 0
27309 && (info->vrsave_save_offset
27310 + info->total_size - frame_off) > 32767))
27312 int sel = SAVRES_SAVE | SAVRES_VR;
27313 unsigned ptr_regno = ptr_regno_for_savres (sel);
27315 if (using_static_chain_p
27316 && ptr_regno == STATIC_CHAIN_REGNUM)
27317 ptr_regno = 12;
27318 if (REGNO (frame_reg_rtx) != ptr_regno)
27319 START_USE (ptr_regno);
27320 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27321 frame_reg_rtx = ptr_reg;
27322 ptr_off = info->altivec_save_offset + info->altivec_size;
27323 frame_off = -ptr_off;
27325 else if (REGNO (frame_reg_rtx) == 1)
27326 frame_off = info->total_size;
27327 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27328 ptr_reg, ptr_off);
27329 if (REGNO (frame_reg_rtx) == 12)
27330 sp_adjust = 0;
27331 sp_off = info->total_size;
27332 if (frame_reg_rtx != sp_reg_rtx)
27333 rs6000_emit_stack_tie (frame_reg_rtx, false);
27336 /* Set frame pointer, if needed. */
27337 if (frame_pointer_needed)
27339 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27340 sp_reg_rtx);
27341 RTX_FRAME_RELATED_P (insn) = 1;
27344 /* Save AltiVec registers if needed. Save here because the red zone does
27345 not always include AltiVec registers. */
27346 if (!WORLD_SAVE_P (info)
27347 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27349 int end_save = info->altivec_save_offset + info->altivec_size;
27350 int ptr_off;
27351 /* Oddly, the vector save/restore functions point r0 at the end
27352 of the save area, then use r11 or r12 to load offsets for
27353 [reg+reg] addressing. */
27354 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27355 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27356 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27358 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27359 NOT_INUSE (0);
27360 if (scratch_regno == 12)
27361 sp_adjust = 0;
27362 if (end_save + frame_off != 0)
27364 rtx offset = GEN_INT (end_save + frame_off);
27366 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27368 else
27369 emit_move_insn (ptr_reg, frame_reg_rtx);
27371 ptr_off = -end_save;
27372 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27373 info->altivec_save_offset + ptr_off,
27374 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27375 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27376 NULL_RTX, NULL_RTX);
27377 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27379 /* The oddity mentioned above clobbered our frame reg. */
27380 emit_move_insn (frame_reg_rtx, ptr_reg);
27381 frame_off = ptr_off;
27384 else if (!WORLD_SAVE_P (info)
27385 && info->altivec_size != 0)
27387 int i;
27389 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27390 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27392 rtx areg, savereg, mem;
27393 HOST_WIDE_INT offset;
27395 offset = (info->altivec_save_offset + frame_off
27396 + 16 * (i - info->first_altivec_reg_save));
27398 savereg = gen_rtx_REG (V4SImode, i);
27400 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27402 mem = gen_frame_mem (V4SImode,
27403 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27404 GEN_INT (offset)));
27405 insn = emit_insn (gen_rtx_SET (mem, savereg));
27406 areg = NULL_RTX;
27408 else
27410 NOT_INUSE (0);
27411 areg = gen_rtx_REG (Pmode, 0);
27412 emit_move_insn (areg, GEN_INT (offset));
27414 /* AltiVec addressing mode is [reg+reg]. */
27415 mem = gen_frame_mem (V4SImode,
27416 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27418 /* Rather than emitting a generic move, force use of the stvx
27419 instruction, which we always want on ISA 2.07 (power8) systems.
27420 In particular we don't want xxpermdi/stxvd2x for little
27421 endian. */
27422 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27425 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27426 areg, GEN_INT (offset));
27430 /* VRSAVE is a bit vector representing which AltiVec registers
27431 are used. The OS uses this to determine which vector
27432 registers to save on a context switch. We need to save
27433 VRSAVE on the stack frame, add whatever AltiVec registers we
27434 used in this function, and do the corresponding magic in the
27435 epilogue. */
27437 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27439 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27440 be using r12 as frame_reg_rtx and r11 as the static chain
27441 pointer for nested functions. */
27442 int save_regno = 12;
27443 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27444 && !using_static_chain_p)
27445 save_regno = 11;
27446 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27448 save_regno = 11;
27449 if (using_static_chain_p)
27450 save_regno = 0;
27452 NOT_INUSE (save_regno);
27454 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27457 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27458 if (!TARGET_SINGLE_PIC_BASE
27459 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27460 && !constant_pool_empty_p ())
27461 || (DEFAULT_ABI == ABI_V4
27462 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27463 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27465 /* If emit_load_toc_table will use the link register, we need to save
27466 it. We use R12 for this purpose because emit_load_toc_table
27467 can use register 0. This allows us to use a plain 'blr' to return
27468 from the procedure more often. */
27469 int save_LR_around_toc_setup = (TARGET_ELF
27470 && DEFAULT_ABI == ABI_V4
27471 && flag_pic
27472 && ! info->lr_save_p
27473 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27474 if (save_LR_around_toc_setup)
27476 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27477 rtx tmp = gen_rtx_REG (Pmode, 12);
27479 sp_adjust = 0;
27480 insn = emit_move_insn (tmp, lr);
27481 RTX_FRAME_RELATED_P (insn) = 1;
27483 rs6000_emit_load_toc_table (TRUE);
27485 insn = emit_move_insn (lr, tmp);
27486 add_reg_note (insn, REG_CFA_RESTORE, lr);
27487 RTX_FRAME_RELATED_P (insn) = 1;
27489 else
27490 rs6000_emit_load_toc_table (TRUE);
27493 #if TARGET_MACHO
27494 if (!TARGET_SINGLE_PIC_BASE
27495 && DEFAULT_ABI == ABI_DARWIN
27496 && flag_pic && crtl->uses_pic_offset_table)
27498 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27499 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27501 /* Save and restore LR locally around this call (in R0). */
27502 if (!info->lr_save_p)
27503 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27505 emit_insn (gen_load_macho_picbase (src));
27507 emit_move_insn (gen_rtx_REG (Pmode,
27508 RS6000_PIC_OFFSET_TABLE_REGNUM),
27509 lr);
27511 if (!info->lr_save_p)
27512 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27514 #endif
27516 /* If we need to, save the TOC register after doing the stack setup.
27517 Do not emit eh frame info for this save. The unwinder wants info,
27518 conceptually attached to instructions in this function, about
27519 register values in the caller of this function. This R2 may have
27520 already been changed from the value in the caller.
27521 We don't attempt to write accurate DWARF EH frame info for R2
27522 because code emitted by gcc for a (non-pointer) function call
27523 doesn't save and restore R2. Instead, R2 is managed out-of-line
27524 by a linker generated plt call stub when the function resides in
27525 a shared library. This behavior is costly to describe in DWARF,
27526 both in terms of the size of DWARF info and the time taken in the
27527 unwinder to interpret it. R2 changes, apart from the
27528 calls_eh_return case earlier in this function, are handled by
27529 linux-unwind.h frob_update_context. */
27530 if (rs6000_save_toc_in_prologue_p ())
27532 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27533 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27536 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27537 if (using_split_stack && split_stack_arg_pointer_used_p ())
27538 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27541 /* Output .extern statements for the save/restore routines we use. */
27543 static void
27544 rs6000_output_savres_externs (FILE *file)
27546 rs6000_stack_t *info = rs6000_stack_info ();
27548 if (TARGET_DEBUG_STACK)
27549 debug_stack_info (info);
27551 /* Write .extern for any function we will call to save and restore
27552 fp values. */
27553 if (info->first_fp_reg_save < 64
27554 && !TARGET_MACHO
27555 && !TARGET_ELF)
27557 char *name;
27558 int regno = info->first_fp_reg_save - 32;
27560 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27562 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27563 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27564 name = rs6000_savres_routine_name (regno, sel);
27565 fprintf (file, "\t.extern %s\n", name);
27567 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27569 bool lr = (info->savres_strategy
27570 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27571 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27572 name = rs6000_savres_routine_name (regno, sel);
27573 fprintf (file, "\t.extern %s\n", name);
27578 /* Write function prologue. */
27580 static void
27581 rs6000_output_function_prologue (FILE *file)
27583 if (!cfun->is_thunk)
27584 rs6000_output_savres_externs (file);
27586 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27587 immediately after the global entry point label. */
27588 if (rs6000_global_entry_point_needed_p ())
27590 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27592 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27594 if (TARGET_CMODEL != CMODEL_LARGE)
27596 /* In the small and medium code models, we assume the TOC is less
27597 2 GB away from the text section, so it can be computed via the
27598 following two-instruction sequence. */
27599 char buf[256];
27601 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27602 fprintf (file, "0:\taddis 2,12,.TOC.-");
27603 assemble_name (file, buf);
27604 fprintf (file, "@ha\n");
27605 fprintf (file, "\taddi 2,2,.TOC.-");
27606 assemble_name (file, buf);
27607 fprintf (file, "@l\n");
27609 else
27611 /* In the large code model, we allow arbitrary offsets between the
27612 TOC and the text section, so we have to load the offset from
27613 memory. The data field is emitted directly before the global
27614 entry point in rs6000_elf_declare_function_name. */
27615 char buf[256];
27617 #ifdef HAVE_AS_ENTRY_MARKERS
27618 /* If supported by the linker, emit a marker relocation. If the
27619 total code size of the final executable or shared library
27620 happens to fit into 2 GB after all, the linker will replace
27621 this code sequence with the sequence for the small or medium
27622 code model. */
27623 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27624 #endif
27625 fprintf (file, "\tld 2,");
27626 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27627 assemble_name (file, buf);
27628 fprintf (file, "-");
27629 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27630 assemble_name (file, buf);
27631 fprintf (file, "(12)\n");
27632 fprintf (file, "\tadd 2,2,12\n");
27635 fputs ("\t.localentry\t", file);
27636 assemble_name (file, name);
27637 fputs (",.-", file);
27638 assemble_name (file, name);
27639 fputs ("\n", file);
27642 /* Output -mprofile-kernel code. This needs to be done here instead of
27643 in output_function_profile since it must go after the ELFv2 ABI
27644 local entry point. */
27645 if (TARGET_PROFILE_KERNEL && crtl->profile)
27647 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27648 gcc_assert (!TARGET_32BIT);
27650 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27652 /* In the ELFv2 ABI we have no compiler stack word. It must be
27653 the resposibility of _mcount to preserve the static chain
27654 register if required. */
27655 if (DEFAULT_ABI != ABI_ELFv2
27656 && cfun->static_chain_decl != NULL)
27658 asm_fprintf (file, "\tstd %s,24(%s)\n",
27659 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27660 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27661 asm_fprintf (file, "\tld %s,24(%s)\n",
27662 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27664 else
27665 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27668 rs6000_pic_labelno++;
27671 /* -mprofile-kernel code calls mcount before the function prolog,
27672 so a profiled leaf function should stay a leaf function. */
27673 static bool
27674 rs6000_keep_leaf_when_profiled ()
27676 return TARGET_PROFILE_KERNEL;
27679 /* Non-zero if vmx regs are restored before the frame pop, zero if
27680 we restore after the pop when possible. */
27681 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27683 /* Restoring cr is a two step process: loading a reg from the frame
27684 save, then moving the reg to cr. For ABI_V4 we must let the
27685 unwinder know that the stack location is no longer valid at or
27686 before the stack deallocation, but we can't emit a cfa_restore for
27687 cr at the stack deallocation like we do for other registers.
27688 The trouble is that it is possible for the move to cr to be
27689 scheduled after the stack deallocation. So say exactly where cr
27690 is located on each of the two insns. */
27692 static rtx
27693 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27695 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27696 rtx reg = gen_rtx_REG (SImode, regno);
27697 rtx_insn *insn = emit_move_insn (reg, mem);
27699 if (!exit_func && DEFAULT_ABI == ABI_V4)
27701 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27702 rtx set = gen_rtx_SET (reg, cr);
27704 add_reg_note (insn, REG_CFA_REGISTER, set);
27705 RTX_FRAME_RELATED_P (insn) = 1;
27707 return reg;
27710 /* Reload CR from REG. */
27712 static void
27713 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27715 int count = 0;
27716 int i;
27718 if (using_mfcr_multiple)
27720 for (i = 0; i < 8; i++)
27721 if (save_reg_p (CR0_REGNO + i))
27722 count++;
27723 gcc_assert (count);
27726 if (using_mfcr_multiple && count > 1)
27728 rtx_insn *insn;
27729 rtvec p;
27730 int ndx;
27732 p = rtvec_alloc (count);
27734 ndx = 0;
27735 for (i = 0; i < 8; i++)
27736 if (save_reg_p (CR0_REGNO + i))
27738 rtvec r = rtvec_alloc (2);
27739 RTVEC_ELT (r, 0) = reg;
27740 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27741 RTVEC_ELT (p, ndx) =
27742 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27743 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27744 ndx++;
27746 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27747 gcc_assert (ndx == count);
27749 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27750 CR field separately. */
27751 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27753 for (i = 0; i < 8; i++)
27754 if (save_reg_p (CR0_REGNO + i))
27755 add_reg_note (insn, REG_CFA_RESTORE,
27756 gen_rtx_REG (SImode, CR0_REGNO + i));
27758 RTX_FRAME_RELATED_P (insn) = 1;
27761 else
27762 for (i = 0; i < 8; i++)
27763 if (save_reg_p (CR0_REGNO + i))
27765 rtx insn = emit_insn (gen_movsi_to_cr_one
27766 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27768 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27769 CR field separately, attached to the insn that in fact
27770 restores this particular CR field. */
27771 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27773 add_reg_note (insn, REG_CFA_RESTORE,
27774 gen_rtx_REG (SImode, CR0_REGNO + i));
27776 RTX_FRAME_RELATED_P (insn) = 1;
27780 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27781 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27782 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27784 rtx_insn *insn = get_last_insn ();
27785 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27787 add_reg_note (insn, REG_CFA_RESTORE, cr);
27788 RTX_FRAME_RELATED_P (insn) = 1;
27792 /* Like cr, the move to lr instruction can be scheduled after the
27793 stack deallocation, but unlike cr, its stack frame save is still
27794 valid. So we only need to emit the cfa_restore on the correct
27795 instruction. */
27797 static void
27798 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27800 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27801 rtx reg = gen_rtx_REG (Pmode, regno);
27803 emit_move_insn (reg, mem);
27806 static void
27807 restore_saved_lr (int regno, bool exit_func)
27809 rtx reg = gen_rtx_REG (Pmode, regno);
27810 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27811 rtx_insn *insn = emit_move_insn (lr, reg);
27813 if (!exit_func && flag_shrink_wrap)
27815 add_reg_note (insn, REG_CFA_RESTORE, lr);
27816 RTX_FRAME_RELATED_P (insn) = 1;
27820 static rtx
27821 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27823 if (DEFAULT_ABI == ABI_ELFv2)
27825 int i;
27826 for (i = 0; i < 8; i++)
27827 if (save_reg_p (CR0_REGNO + i))
27829 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27830 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27831 cfa_restores);
27834 else if (info->cr_save_p)
27835 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27836 gen_rtx_REG (SImode, CR2_REGNO),
27837 cfa_restores);
27839 if (info->lr_save_p)
27840 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27841 gen_rtx_REG (Pmode, LR_REGNO),
27842 cfa_restores);
27843 return cfa_restores;
27846 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27847 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27848 below stack pointer not cloberred by signals. */
27850 static inline bool
27851 offset_below_red_zone_p (HOST_WIDE_INT offset)
27853 return offset < (DEFAULT_ABI == ABI_V4
27855 : TARGET_32BIT ? -220 : -288);
27858 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27860 static void
27861 emit_cfa_restores (rtx cfa_restores)
27863 rtx_insn *insn = get_last_insn ();
27864 rtx *loc = &REG_NOTES (insn);
27866 while (*loc)
27867 loc = &XEXP (*loc, 1);
27868 *loc = cfa_restores;
27869 RTX_FRAME_RELATED_P (insn) = 1;
27872 /* Emit function epilogue as insns. */
27874 void
27875 rs6000_emit_epilogue (int sibcall)
27877 rs6000_stack_t *info;
27878 int restoring_GPRs_inline;
27879 int restoring_FPRs_inline;
27880 int using_load_multiple;
27881 int using_mtcr_multiple;
27882 int use_backchain_to_restore_sp;
27883 int restore_lr;
27884 int strategy;
27885 HOST_WIDE_INT frame_off = 0;
27886 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27887 rtx frame_reg_rtx = sp_reg_rtx;
27888 rtx cfa_restores = NULL_RTX;
27889 rtx insn;
27890 rtx cr_save_reg = NULL_RTX;
27891 machine_mode reg_mode = Pmode;
27892 int reg_size = TARGET_32BIT ? 4 : 8;
27893 machine_mode fp_reg_mode = (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
27894 ? DFmode : SFmode;
27895 int fp_reg_size = 8;
27896 int i;
27897 bool exit_func;
27898 unsigned ptr_regno;
27900 info = rs6000_stack_info ();
27902 strategy = info->savres_strategy;
27903 using_load_multiple = strategy & REST_MULTIPLE;
27904 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27905 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27906 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
27907 || rs6000_cpu == PROCESSOR_PPC603
27908 || rs6000_cpu == PROCESSOR_PPC750
27909 || optimize_size);
27910 /* Restore via the backchain when we have a large frame, since this
27911 is more efficient than an addis, addi pair. The second condition
27912 here will not trigger at the moment; We don't actually need a
27913 frame pointer for alloca, but the generic parts of the compiler
27914 give us one anyway. */
27915 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27916 ? info->lr_save_offset
27917 : 0) > 32767
27918 || (cfun->calls_alloca
27919 && !frame_pointer_needed));
27920 restore_lr = (info->lr_save_p
27921 && (restoring_FPRs_inline
27922 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27923 && (restoring_GPRs_inline
27924 || info->first_fp_reg_save < 64)
27925 && !cfun->machine->lr_is_wrapped_separately);
27928 if (WORLD_SAVE_P (info))
27930 int i, j;
27931 char rname[30];
27932 const char *alloc_rname;
27933 rtvec p;
27935 /* eh_rest_world_r10 will return to the location saved in the LR
27936 stack slot (which is not likely to be our caller.)
27937 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27938 rest_world is similar, except any R10 parameter is ignored.
27939 The exception-handling stuff that was here in 2.95 is no
27940 longer necessary. */
27942 p = rtvec_alloc (9
27943 + 32 - info->first_gp_reg_save
27944 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27945 + 63 + 1 - info->first_fp_reg_save);
27947 strcpy (rname, ((crtl->calls_eh_return) ?
27948 "*eh_rest_world_r10" : "*rest_world"));
27949 alloc_rname = ggc_strdup (rname);
27951 j = 0;
27952 RTVEC_ELT (p, j++) = ret_rtx;
27953 RTVEC_ELT (p, j++)
27954 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27955 /* The instruction pattern requires a clobber here;
27956 it is shared with the restVEC helper. */
27957 RTVEC_ELT (p, j++)
27958 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27961 /* CR register traditionally saved as CR2. */
27962 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27963 RTVEC_ELT (p, j++)
27964 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27965 if (flag_shrink_wrap)
27967 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27968 gen_rtx_REG (Pmode, LR_REGNO),
27969 cfa_restores);
27970 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27974 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27976 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27977 RTVEC_ELT (p, j++)
27978 = gen_frame_load (reg,
27979 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27980 if (flag_shrink_wrap
27981 && save_reg_p (info->first_gp_reg_save + i))
27982 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27984 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27986 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27987 RTVEC_ELT (p, j++)
27988 = gen_frame_load (reg,
27989 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27990 if (flag_shrink_wrap
27991 && save_reg_p (info->first_altivec_reg_save + i))
27992 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27994 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27996 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
27997 ? DFmode : SFmode),
27998 info->first_fp_reg_save + i);
27999 RTVEC_ELT (p, j++)
28000 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28001 if (flag_shrink_wrap
28002 && save_reg_p (info->first_fp_reg_save + i))
28003 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28005 RTVEC_ELT (p, j++)
28006 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
28007 RTVEC_ELT (p, j++)
28008 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
28009 RTVEC_ELT (p, j++)
28010 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
28011 RTVEC_ELT (p, j++)
28012 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
28013 RTVEC_ELT (p, j++)
28014 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28015 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28017 if (flag_shrink_wrap)
28019 REG_NOTES (insn) = cfa_restores;
28020 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28021 RTX_FRAME_RELATED_P (insn) = 1;
28023 return;
28026 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28027 if (info->push_p)
28028 frame_off = info->total_size;
28030 /* Restore AltiVec registers if we must do so before adjusting the
28031 stack. */
28032 if (info->altivec_size != 0
28033 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28034 || (DEFAULT_ABI != ABI_V4
28035 && offset_below_red_zone_p (info->altivec_save_offset))))
28037 int i;
28038 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28040 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28041 if (use_backchain_to_restore_sp)
28043 int frame_regno = 11;
28045 if ((strategy & REST_INLINE_VRS) == 0)
28047 /* Of r11 and r12, select the one not clobbered by an
28048 out-of-line restore function for the frame register. */
28049 frame_regno = 11 + 12 - scratch_regno;
28051 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28052 emit_move_insn (frame_reg_rtx,
28053 gen_rtx_MEM (Pmode, sp_reg_rtx));
28054 frame_off = 0;
28056 else if (frame_pointer_needed)
28057 frame_reg_rtx = hard_frame_pointer_rtx;
28059 if ((strategy & REST_INLINE_VRS) == 0)
28061 int end_save = info->altivec_save_offset + info->altivec_size;
28062 int ptr_off;
28063 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28064 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28066 if (end_save + frame_off != 0)
28068 rtx offset = GEN_INT (end_save + frame_off);
28070 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28072 else
28073 emit_move_insn (ptr_reg, frame_reg_rtx);
28075 ptr_off = -end_save;
28076 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28077 info->altivec_save_offset + ptr_off,
28078 0, V4SImode, SAVRES_VR);
28080 else
28082 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28083 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28085 rtx addr, areg, mem, insn;
28086 rtx reg = gen_rtx_REG (V4SImode, i);
28087 HOST_WIDE_INT offset
28088 = (info->altivec_save_offset + frame_off
28089 + 16 * (i - info->first_altivec_reg_save));
28091 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28093 mem = gen_frame_mem (V4SImode,
28094 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28095 GEN_INT (offset)));
28096 insn = gen_rtx_SET (reg, mem);
28098 else
28100 areg = gen_rtx_REG (Pmode, 0);
28101 emit_move_insn (areg, GEN_INT (offset));
28103 /* AltiVec addressing mode is [reg+reg]. */
28104 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28105 mem = gen_frame_mem (V4SImode, addr);
28107 /* Rather than emitting a generic move, force use of the
28108 lvx instruction, which we always want. In particular we
28109 don't want lxvd2x/xxpermdi for little endian. */
28110 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28113 (void) emit_insn (insn);
28117 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28118 if (((strategy & REST_INLINE_VRS) == 0
28119 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28120 && (flag_shrink_wrap
28121 || (offset_below_red_zone_p
28122 (info->altivec_save_offset
28123 + 16 * (i - info->first_altivec_reg_save))))
28124 && save_reg_p (i))
28126 rtx reg = gen_rtx_REG (V4SImode, i);
28127 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28131 /* Restore VRSAVE if we must do so before adjusting the stack. */
28132 if (info->vrsave_size != 0
28133 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28134 || (DEFAULT_ABI != ABI_V4
28135 && offset_below_red_zone_p (info->vrsave_save_offset))))
28137 rtx reg;
28139 if (frame_reg_rtx == sp_reg_rtx)
28141 if (use_backchain_to_restore_sp)
28143 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28144 emit_move_insn (frame_reg_rtx,
28145 gen_rtx_MEM (Pmode, sp_reg_rtx));
28146 frame_off = 0;
28148 else if (frame_pointer_needed)
28149 frame_reg_rtx = hard_frame_pointer_rtx;
28152 reg = gen_rtx_REG (SImode, 12);
28153 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28154 info->vrsave_save_offset + frame_off));
28156 emit_insn (generate_set_vrsave (reg, info, 1));
28159 insn = NULL_RTX;
28160 /* If we have a large stack frame, restore the old stack pointer
28161 using the backchain. */
28162 if (use_backchain_to_restore_sp)
28164 if (frame_reg_rtx == sp_reg_rtx)
28166 /* Under V.4, don't reset the stack pointer until after we're done
28167 loading the saved registers. */
28168 if (DEFAULT_ABI == ABI_V4)
28169 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28171 insn = emit_move_insn (frame_reg_rtx,
28172 gen_rtx_MEM (Pmode, sp_reg_rtx));
28173 frame_off = 0;
28175 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28176 && DEFAULT_ABI == ABI_V4)
28177 /* frame_reg_rtx has been set up by the altivec restore. */
28179 else
28181 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28182 frame_reg_rtx = sp_reg_rtx;
28185 /* If we have a frame pointer, we can restore the old stack pointer
28186 from it. */
28187 else if (frame_pointer_needed)
28189 frame_reg_rtx = sp_reg_rtx;
28190 if (DEFAULT_ABI == ABI_V4)
28191 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28192 /* Prevent reordering memory accesses against stack pointer restore. */
28193 else if (cfun->calls_alloca
28194 || offset_below_red_zone_p (-info->total_size))
28195 rs6000_emit_stack_tie (frame_reg_rtx, true);
28197 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28198 GEN_INT (info->total_size)));
28199 frame_off = 0;
28201 else if (info->push_p
28202 && DEFAULT_ABI != ABI_V4
28203 && !crtl->calls_eh_return)
28205 /* Prevent reordering memory accesses against stack pointer restore. */
28206 if (cfun->calls_alloca
28207 || offset_below_red_zone_p (-info->total_size))
28208 rs6000_emit_stack_tie (frame_reg_rtx, false);
28209 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28210 GEN_INT (info->total_size)));
28211 frame_off = 0;
28213 if (insn && frame_reg_rtx == sp_reg_rtx)
28215 if (cfa_restores)
28217 REG_NOTES (insn) = cfa_restores;
28218 cfa_restores = NULL_RTX;
28220 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28221 RTX_FRAME_RELATED_P (insn) = 1;
28224 /* Restore AltiVec registers if we have not done so already. */
28225 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28226 && info->altivec_size != 0
28227 && (DEFAULT_ABI == ABI_V4
28228 || !offset_below_red_zone_p (info->altivec_save_offset)))
28230 int i;
28232 if ((strategy & REST_INLINE_VRS) == 0)
28234 int end_save = info->altivec_save_offset + info->altivec_size;
28235 int ptr_off;
28236 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28237 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28238 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28240 if (end_save + frame_off != 0)
28242 rtx offset = GEN_INT (end_save + frame_off);
28244 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28246 else
28247 emit_move_insn (ptr_reg, frame_reg_rtx);
28249 ptr_off = -end_save;
28250 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28251 info->altivec_save_offset + ptr_off,
28252 0, V4SImode, SAVRES_VR);
28253 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28255 /* Frame reg was clobbered by out-of-line save. Restore it
28256 from ptr_reg, and if we are calling out-of-line gpr or
28257 fpr restore set up the correct pointer and offset. */
28258 unsigned newptr_regno = 1;
28259 if (!restoring_GPRs_inline)
28261 bool lr = info->gp_save_offset + info->gp_size == 0;
28262 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28263 newptr_regno = ptr_regno_for_savres (sel);
28264 end_save = info->gp_save_offset + info->gp_size;
28266 else if (!restoring_FPRs_inline)
28268 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28269 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28270 newptr_regno = ptr_regno_for_savres (sel);
28271 end_save = info->fp_save_offset + info->fp_size;
28274 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28275 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28277 if (end_save + ptr_off != 0)
28279 rtx offset = GEN_INT (end_save + ptr_off);
28281 frame_off = -end_save;
28282 if (TARGET_32BIT)
28283 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28284 ptr_reg, offset));
28285 else
28286 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28287 ptr_reg, offset));
28289 else
28291 frame_off = ptr_off;
28292 emit_move_insn (frame_reg_rtx, ptr_reg);
28296 else
28298 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28299 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28301 rtx addr, areg, mem, insn;
28302 rtx reg = gen_rtx_REG (V4SImode, i);
28303 HOST_WIDE_INT offset
28304 = (info->altivec_save_offset + frame_off
28305 + 16 * (i - info->first_altivec_reg_save));
28307 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28309 mem = gen_frame_mem (V4SImode,
28310 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28311 GEN_INT (offset)));
28312 insn = gen_rtx_SET (reg, mem);
28314 else
28316 areg = gen_rtx_REG (Pmode, 0);
28317 emit_move_insn (areg, GEN_INT (offset));
28319 /* AltiVec addressing mode is [reg+reg]. */
28320 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28321 mem = gen_frame_mem (V4SImode, addr);
28323 /* Rather than emitting a generic move, force use of the
28324 lvx instruction, which we always want. In particular we
28325 don't want lxvd2x/xxpermdi for little endian. */
28326 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28329 (void) emit_insn (insn);
28333 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28334 if (((strategy & REST_INLINE_VRS) == 0
28335 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28336 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28337 && save_reg_p (i))
28339 rtx reg = gen_rtx_REG (V4SImode, i);
28340 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28344 /* Restore VRSAVE if we have not done so already. */
28345 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28346 && info->vrsave_size != 0
28347 && (DEFAULT_ABI == ABI_V4
28348 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28350 rtx reg;
28352 reg = gen_rtx_REG (SImode, 12);
28353 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28354 info->vrsave_save_offset + frame_off));
28356 emit_insn (generate_set_vrsave (reg, info, 1));
28359 /* If we exit by an out-of-line restore function on ABI_V4 then that
28360 function will deallocate the stack, so we don't need to worry
28361 about the unwinder restoring cr from an invalid stack frame
28362 location. */
28363 exit_func = (!restoring_FPRs_inline
28364 || (!restoring_GPRs_inline
28365 && info->first_fp_reg_save == 64));
28367 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28368 *separate* slots if the routine calls __builtin_eh_return, so
28369 that they can be independently restored by the unwinder. */
28370 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28372 int i, cr_off = info->ehcr_offset;
28374 for (i = 0; i < 8; i++)
28375 if (!call_used_regs[CR0_REGNO + i])
28377 rtx reg = gen_rtx_REG (SImode, 0);
28378 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28379 cr_off + frame_off));
28381 insn = emit_insn (gen_movsi_to_cr_one
28382 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28384 if (!exit_func && flag_shrink_wrap)
28386 add_reg_note (insn, REG_CFA_RESTORE,
28387 gen_rtx_REG (SImode, CR0_REGNO + i));
28389 RTX_FRAME_RELATED_P (insn) = 1;
28392 cr_off += reg_size;
28396 /* Get the old lr if we saved it. If we are restoring registers
28397 out-of-line, then the out-of-line routines can do this for us. */
28398 if (restore_lr && restoring_GPRs_inline)
28399 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28401 /* Get the old cr if we saved it. */
28402 if (info->cr_save_p)
28404 unsigned cr_save_regno = 12;
28406 if (!restoring_GPRs_inline)
28408 /* Ensure we don't use the register used by the out-of-line
28409 gpr register restore below. */
28410 bool lr = info->gp_save_offset + info->gp_size == 0;
28411 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28412 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28414 if (gpr_ptr_regno == 12)
28415 cr_save_regno = 11;
28416 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28418 else if (REGNO (frame_reg_rtx) == 12)
28419 cr_save_regno = 11;
28421 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28422 info->cr_save_offset + frame_off,
28423 exit_func);
28426 /* Set LR here to try to overlap restores below. */
28427 if (restore_lr && restoring_GPRs_inline)
28428 restore_saved_lr (0, exit_func);
28430 /* Load exception handler data registers, if needed. */
28431 if (crtl->calls_eh_return)
28433 unsigned int i, regno;
28435 if (TARGET_AIX)
28437 rtx reg = gen_rtx_REG (reg_mode, 2);
28438 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28439 frame_off + RS6000_TOC_SAVE_SLOT));
28442 for (i = 0; ; ++i)
28444 rtx mem;
28446 regno = EH_RETURN_DATA_REGNO (i);
28447 if (regno == INVALID_REGNUM)
28448 break;
28450 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28451 info->ehrd_offset + frame_off
28452 + reg_size * (int) i);
28454 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28458 /* Restore GPRs. This is done as a PARALLEL if we are using
28459 the load-multiple instructions. */
28460 if (!restoring_GPRs_inline)
28462 /* We are jumping to an out-of-line function. */
28463 rtx ptr_reg;
28464 int end_save = info->gp_save_offset + info->gp_size;
28465 bool can_use_exit = end_save == 0;
28466 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28467 int ptr_off;
28469 /* Emit stack reset code if we need it. */
28470 ptr_regno = ptr_regno_for_savres (sel);
28471 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28472 if (can_use_exit)
28473 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28474 else if (end_save + frame_off != 0)
28475 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28476 GEN_INT (end_save + frame_off)));
28477 else if (REGNO (frame_reg_rtx) != ptr_regno)
28478 emit_move_insn (ptr_reg, frame_reg_rtx);
28479 if (REGNO (frame_reg_rtx) == ptr_regno)
28480 frame_off = -end_save;
28482 if (can_use_exit && info->cr_save_p)
28483 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28485 ptr_off = -end_save;
28486 rs6000_emit_savres_rtx (info, ptr_reg,
28487 info->gp_save_offset + ptr_off,
28488 info->lr_save_offset + ptr_off,
28489 reg_mode, sel);
28491 else if (using_load_multiple)
28493 rtvec p;
28494 p = rtvec_alloc (32 - info->first_gp_reg_save);
28495 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28496 RTVEC_ELT (p, i)
28497 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28498 frame_reg_rtx,
28499 info->gp_save_offset + frame_off + reg_size * i);
28500 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28502 else
28504 int offset = info->gp_save_offset + frame_off;
28505 for (i = info->first_gp_reg_save; i < 32; i++)
28507 if (save_reg_p (i)
28508 && !cfun->machine->gpr_is_wrapped_separately[i])
28510 rtx reg = gen_rtx_REG (reg_mode, i);
28511 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28514 offset += reg_size;
28518 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28520 /* If the frame pointer was used then we can't delay emitting
28521 a REG_CFA_DEF_CFA note. This must happen on the insn that
28522 restores the frame pointer, r31. We may have already emitted
28523 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28524 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28525 be harmless if emitted. */
28526 if (frame_pointer_needed)
28528 insn = get_last_insn ();
28529 add_reg_note (insn, REG_CFA_DEF_CFA,
28530 plus_constant (Pmode, frame_reg_rtx, frame_off));
28531 RTX_FRAME_RELATED_P (insn) = 1;
28534 /* Set up cfa_restores. We always need these when
28535 shrink-wrapping. If not shrink-wrapping then we only need
28536 the cfa_restore when the stack location is no longer valid.
28537 The cfa_restores must be emitted on or before the insn that
28538 invalidates the stack, and of course must not be emitted
28539 before the insn that actually does the restore. The latter
28540 is why it is a bad idea to emit the cfa_restores as a group
28541 on the last instruction here that actually does a restore:
28542 That insn may be reordered with respect to others doing
28543 restores. */
28544 if (flag_shrink_wrap
28545 && !restoring_GPRs_inline
28546 && info->first_fp_reg_save == 64)
28547 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28549 for (i = info->first_gp_reg_save; i < 32; i++)
28550 if (save_reg_p (i)
28551 && !cfun->machine->gpr_is_wrapped_separately[i])
28553 rtx reg = gen_rtx_REG (reg_mode, i);
28554 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28558 if (!restoring_GPRs_inline
28559 && info->first_fp_reg_save == 64)
28561 /* We are jumping to an out-of-line function. */
28562 if (cfa_restores)
28563 emit_cfa_restores (cfa_restores);
28564 return;
28567 if (restore_lr && !restoring_GPRs_inline)
28569 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28570 restore_saved_lr (0, exit_func);
28573 /* Restore fpr's if we need to do it without calling a function. */
28574 if (restoring_FPRs_inline)
28576 int offset = info->fp_save_offset + frame_off;
28577 for (i = info->first_fp_reg_save; i < 64; i++)
28579 if (save_reg_p (i)
28580 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28582 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28583 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28584 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28585 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28586 cfa_restores);
28589 offset += fp_reg_size;
28593 /* If we saved cr, restore it here. Just those that were used. */
28594 if (info->cr_save_p)
28595 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28597 /* If this is V.4, unwind the stack pointer after all of the loads
28598 have been done, or set up r11 if we are restoring fp out of line. */
28599 ptr_regno = 1;
28600 if (!restoring_FPRs_inline)
28602 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28603 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28604 ptr_regno = ptr_regno_for_savres (sel);
28607 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28608 if (REGNO (frame_reg_rtx) == ptr_regno)
28609 frame_off = 0;
28611 if (insn && restoring_FPRs_inline)
28613 if (cfa_restores)
28615 REG_NOTES (insn) = cfa_restores;
28616 cfa_restores = NULL_RTX;
28618 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28619 RTX_FRAME_RELATED_P (insn) = 1;
28622 if (crtl->calls_eh_return)
28624 rtx sa = EH_RETURN_STACKADJ_RTX;
28625 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28628 if (!sibcall && restoring_FPRs_inline)
28630 if (cfa_restores)
28632 /* We can't hang the cfa_restores off a simple return,
28633 since the shrink-wrap code sometimes uses an existing
28634 return. This means there might be a path from
28635 pre-prologue code to this return, and dwarf2cfi code
28636 wants the eh_frame unwinder state to be the same on
28637 all paths to any point. So we need to emit the
28638 cfa_restores before the return. For -m64 we really
28639 don't need epilogue cfa_restores at all, except for
28640 this irritating dwarf2cfi with shrink-wrap
28641 requirement; The stack red-zone means eh_frame info
28642 from the prologue telling the unwinder to restore
28643 from the stack is perfectly good right to the end of
28644 the function. */
28645 emit_insn (gen_blockage ());
28646 emit_cfa_restores (cfa_restores);
28647 cfa_restores = NULL_RTX;
28650 emit_jump_insn (targetm.gen_simple_return ());
28653 if (!sibcall && !restoring_FPRs_inline)
28655 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28656 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28657 int elt = 0;
28658 RTVEC_ELT (p, elt++) = ret_rtx;
28659 if (lr)
28660 RTVEC_ELT (p, elt++)
28661 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28663 /* We have to restore more than two FP registers, so branch to the
28664 restore function. It will return to our caller. */
28665 int i;
28666 int reg;
28667 rtx sym;
28669 if (flag_shrink_wrap)
28670 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28672 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28673 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28674 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28675 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28677 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28679 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28681 RTVEC_ELT (p, elt++)
28682 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28683 if (flag_shrink_wrap
28684 && save_reg_p (info->first_fp_reg_save + i))
28685 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28688 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28691 if (cfa_restores)
28693 if (sibcall)
28694 /* Ensure the cfa_restores are hung off an insn that won't
28695 be reordered above other restores. */
28696 emit_insn (gen_blockage ());
28698 emit_cfa_restores (cfa_restores);
28702 /* Write function epilogue. */
28704 static void
28705 rs6000_output_function_epilogue (FILE *file)
28707 #if TARGET_MACHO
28708 macho_branch_islands ();
28711 rtx_insn *insn = get_last_insn ();
28712 rtx_insn *deleted_debug_label = NULL;
28714 /* Mach-O doesn't support labels at the end of objects, so if
28715 it looks like we might want one, take special action.
28717 First, collect any sequence of deleted debug labels. */
28718 while (insn
28719 && NOTE_P (insn)
28720 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28722 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28723 notes only, instead set their CODE_LABEL_NUMBER to -1,
28724 otherwise there would be code generation differences
28725 in between -g and -g0. */
28726 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28727 deleted_debug_label = insn;
28728 insn = PREV_INSN (insn);
28731 /* Second, if we have:
28732 label:
28733 barrier
28734 then this needs to be detected, so skip past the barrier. */
28736 if (insn && BARRIER_P (insn))
28737 insn = PREV_INSN (insn);
28739 /* Up to now we've only seen notes or barriers. */
28740 if (insn)
28742 if (LABEL_P (insn)
28743 || (NOTE_P (insn)
28744 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28745 /* Trailing label: <barrier>. */
28746 fputs ("\tnop\n", file);
28747 else
28749 /* Lastly, see if we have a completely empty function body. */
28750 while (insn && ! INSN_P (insn))
28751 insn = PREV_INSN (insn);
28752 /* If we don't find any insns, we've got an empty function body;
28753 I.e. completely empty - without a return or branch. This is
28754 taken as the case where a function body has been removed
28755 because it contains an inline __builtin_unreachable(). GCC
28756 states that reaching __builtin_unreachable() means UB so we're
28757 not obliged to do anything special; however, we want
28758 non-zero-sized function bodies. To meet this, and help the
28759 user out, let's trap the case. */
28760 if (insn == NULL)
28761 fputs ("\ttrap\n", file);
28764 else if (deleted_debug_label)
28765 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28766 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28767 CODE_LABEL_NUMBER (insn) = -1;
28769 #endif
28771 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28772 on its format.
28774 We don't output a traceback table if -finhibit-size-directive was
28775 used. The documentation for -finhibit-size-directive reads
28776 ``don't output a @code{.size} assembler directive, or anything
28777 else that would cause trouble if the function is split in the
28778 middle, and the two halves are placed at locations far apart in
28779 memory.'' The traceback table has this property, since it
28780 includes the offset from the start of the function to the
28781 traceback table itself.
28783 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28784 different traceback table. */
28785 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28786 && ! flag_inhibit_size_directive
28787 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28789 const char *fname = NULL;
28790 const char *language_string = lang_hooks.name;
28791 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28792 int i;
28793 int optional_tbtab;
28794 rs6000_stack_t *info = rs6000_stack_info ();
28796 if (rs6000_traceback == traceback_full)
28797 optional_tbtab = 1;
28798 else if (rs6000_traceback == traceback_part)
28799 optional_tbtab = 0;
28800 else
28801 optional_tbtab = !optimize_size && !TARGET_ELF;
28803 if (optional_tbtab)
28805 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28806 while (*fname == '.') /* V.4 encodes . in the name */
28807 fname++;
28809 /* Need label immediately before tbtab, so we can compute
28810 its offset from the function start. */
28811 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28812 ASM_OUTPUT_LABEL (file, fname);
28815 /* The .tbtab pseudo-op can only be used for the first eight
28816 expressions, since it can't handle the possibly variable
28817 length fields that follow. However, if you omit the optional
28818 fields, the assembler outputs zeros for all optional fields
28819 anyways, giving each variable length field is minimum length
28820 (as defined in sys/debug.h). Thus we can not use the .tbtab
28821 pseudo-op at all. */
28823 /* An all-zero word flags the start of the tbtab, for debuggers
28824 that have to find it by searching forward from the entry
28825 point or from the current pc. */
28826 fputs ("\t.long 0\n", file);
28828 /* Tbtab format type. Use format type 0. */
28829 fputs ("\t.byte 0,", file);
28831 /* Language type. Unfortunately, there does not seem to be any
28832 official way to discover the language being compiled, so we
28833 use language_string.
28834 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28835 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28836 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28837 either, so for now use 0. */
28838 if (lang_GNU_C ()
28839 || ! strcmp (language_string, "GNU GIMPLE")
28840 || ! strcmp (language_string, "GNU Go")
28841 || ! strcmp (language_string, "libgccjit"))
28842 i = 0;
28843 else if (! strcmp (language_string, "GNU F77")
28844 || lang_GNU_Fortran ())
28845 i = 1;
28846 else if (! strcmp (language_string, "GNU Pascal"))
28847 i = 2;
28848 else if (! strcmp (language_string, "GNU Ada"))
28849 i = 3;
28850 else if (lang_GNU_CXX ()
28851 || ! strcmp (language_string, "GNU Objective-C++"))
28852 i = 9;
28853 else if (! strcmp (language_string, "GNU Java"))
28854 i = 13;
28855 else if (! strcmp (language_string, "GNU Objective-C"))
28856 i = 14;
28857 else
28858 gcc_unreachable ();
28859 fprintf (file, "%d,", i);
28861 /* 8 single bit fields: global linkage (not set for C extern linkage,
28862 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28863 from start of procedure stored in tbtab, internal function, function
28864 has controlled storage, function has no toc, function uses fp,
28865 function logs/aborts fp operations. */
28866 /* Assume that fp operations are used if any fp reg must be saved. */
28867 fprintf (file, "%d,",
28868 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28870 /* 6 bitfields: function is interrupt handler, name present in
28871 proc table, function calls alloca, on condition directives
28872 (controls stack walks, 3 bits), saves condition reg, saves
28873 link reg. */
28874 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28875 set up as a frame pointer, even when there is no alloca call. */
28876 fprintf (file, "%d,",
28877 ((optional_tbtab << 6)
28878 | ((optional_tbtab & frame_pointer_needed) << 5)
28879 | (info->cr_save_p << 1)
28880 | (info->lr_save_p)));
28882 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28883 (6 bits). */
28884 fprintf (file, "%d,",
28885 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28887 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28888 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28890 if (optional_tbtab)
28892 /* Compute the parameter info from the function decl argument
28893 list. */
28894 tree decl;
28895 int next_parm_info_bit = 31;
28897 for (decl = DECL_ARGUMENTS (current_function_decl);
28898 decl; decl = DECL_CHAIN (decl))
28900 rtx parameter = DECL_INCOMING_RTL (decl);
28901 machine_mode mode = GET_MODE (parameter);
28903 if (GET_CODE (parameter) == REG)
28905 if (SCALAR_FLOAT_MODE_P (mode))
28907 int bits;
28909 float_parms++;
28911 switch (mode)
28913 case E_SFmode:
28914 case E_SDmode:
28915 bits = 0x2;
28916 break;
28918 case E_DFmode:
28919 case E_DDmode:
28920 case E_TFmode:
28921 case E_TDmode:
28922 case E_IFmode:
28923 case E_KFmode:
28924 bits = 0x3;
28925 break;
28927 default:
28928 gcc_unreachable ();
28931 /* If only one bit will fit, don't or in this entry. */
28932 if (next_parm_info_bit > 0)
28933 parm_info |= (bits << (next_parm_info_bit - 1));
28934 next_parm_info_bit -= 2;
28936 else
28938 fixed_parms += ((GET_MODE_SIZE (mode)
28939 + (UNITS_PER_WORD - 1))
28940 / UNITS_PER_WORD);
28941 next_parm_info_bit -= 1;
28947 /* Number of fixed point parameters. */
28948 /* This is actually the number of words of fixed point parameters; thus
28949 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28950 fprintf (file, "%d,", fixed_parms);
28952 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28953 all on stack. */
28954 /* This is actually the number of fp registers that hold parameters;
28955 and thus the maximum value is 13. */
28956 /* Set parameters on stack bit if parameters are not in their original
28957 registers, regardless of whether they are on the stack? Xlc
28958 seems to set the bit when not optimizing. */
28959 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28961 if (optional_tbtab)
28963 /* Optional fields follow. Some are variable length. */
28965 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28966 float, 11 double float. */
28967 /* There is an entry for each parameter in a register, in the order
28968 that they occur in the parameter list. Any intervening arguments
28969 on the stack are ignored. If the list overflows a long (max
28970 possible length 34 bits) then completely leave off all elements
28971 that don't fit. */
28972 /* Only emit this long if there was at least one parameter. */
28973 if (fixed_parms || float_parms)
28974 fprintf (file, "\t.long %d\n", parm_info);
28976 /* Offset from start of code to tb table. */
28977 fputs ("\t.long ", file);
28978 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28979 RS6000_OUTPUT_BASENAME (file, fname);
28980 putc ('-', file);
28981 rs6000_output_function_entry (file, fname);
28982 putc ('\n', file);
28984 /* Interrupt handler mask. */
28985 /* Omit this long, since we never set the interrupt handler bit
28986 above. */
28988 /* Number of CTL (controlled storage) anchors. */
28989 /* Omit this long, since the has_ctl bit is never set above. */
28991 /* Displacement into stack of each CTL anchor. */
28992 /* Omit this list of longs, because there are no CTL anchors. */
28994 /* Length of function name. */
28995 if (*fname == '*')
28996 ++fname;
28997 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28999 /* Function name. */
29000 assemble_string (fname, strlen (fname));
29002 /* Register for alloca automatic storage; this is always reg 31.
29003 Only emit this if the alloca bit was set above. */
29004 if (frame_pointer_needed)
29005 fputs ("\t.byte 31\n", file);
29007 fputs ("\t.align 2\n", file);
29011 /* Arrange to define .LCTOC1 label, if not already done. */
29012 if (need_toc_init)
29014 need_toc_init = 0;
29015 if (!toc_initialized)
29017 switch_to_section (toc_section);
29018 switch_to_section (current_function_section ());
29023 /* -fsplit-stack support. */
29025 /* A SYMBOL_REF for __morestack. */
29026 static GTY(()) rtx morestack_ref;
29028 static rtx
29029 gen_add3_const (rtx rt, rtx ra, long c)
29031 if (TARGET_64BIT)
29032 return gen_adddi3 (rt, ra, GEN_INT (c));
29033 else
29034 return gen_addsi3 (rt, ra, GEN_INT (c));
29037 /* Emit -fsplit-stack prologue, which goes before the regular function
29038 prologue (at local entry point in the case of ELFv2). */
29040 void
29041 rs6000_expand_split_stack_prologue (void)
29043 rs6000_stack_t *info = rs6000_stack_info ();
29044 unsigned HOST_WIDE_INT allocate;
29045 long alloc_hi, alloc_lo;
29046 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29047 rtx_insn *insn;
29049 gcc_assert (flag_split_stack && reload_completed);
29051 if (!info->push_p)
29052 return;
29054 if (global_regs[29])
29056 error ("%qs uses register r29", "-fsplit-stack");
29057 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29058 "conflicts with %qD", global_regs_decl[29]);
29061 allocate = info->total_size;
29062 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29064 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29065 return;
29067 if (morestack_ref == NULL_RTX)
29069 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29070 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29071 | SYMBOL_FLAG_FUNCTION);
29074 r0 = gen_rtx_REG (Pmode, 0);
29075 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29076 r12 = gen_rtx_REG (Pmode, 12);
29077 emit_insn (gen_load_split_stack_limit (r0));
29078 /* Always emit two insns here to calculate the requested stack,
29079 so that the linker can edit them when adjusting size for calling
29080 non-split-stack code. */
29081 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29082 alloc_lo = -allocate - alloc_hi;
29083 if (alloc_hi != 0)
29085 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29086 if (alloc_lo != 0)
29087 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29088 else
29089 emit_insn (gen_nop ());
29091 else
29093 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29094 emit_insn (gen_nop ());
29097 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29098 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29099 ok_label = gen_label_rtx ();
29100 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29101 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29102 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29103 pc_rtx);
29104 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29105 JUMP_LABEL (insn) = ok_label;
29106 /* Mark the jump as very likely to be taken. */
29107 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29109 lr = gen_rtx_REG (Pmode, LR_REGNO);
29110 insn = emit_move_insn (r0, lr);
29111 RTX_FRAME_RELATED_P (insn) = 1;
29112 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29113 RTX_FRAME_RELATED_P (insn) = 1;
29115 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29116 const0_rtx, const0_rtx));
29117 call_fusage = NULL_RTX;
29118 use_reg (&call_fusage, r12);
29119 /* Say the call uses r0, even though it doesn't, to stop regrename
29120 from twiddling with the insns saving lr, trashing args for cfun.
29121 The insns restoring lr are similarly protected by making
29122 split_stack_return use r0. */
29123 use_reg (&call_fusage, r0);
29124 add_function_usage_to (insn, call_fusage);
29125 /* Indicate that this function can't jump to non-local gotos. */
29126 make_reg_eh_region_note_nothrow_nononlocal (insn);
29127 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29128 insn = emit_move_insn (lr, r0);
29129 add_reg_note (insn, REG_CFA_RESTORE, lr);
29130 RTX_FRAME_RELATED_P (insn) = 1;
29131 emit_insn (gen_split_stack_return ());
29133 emit_label (ok_label);
29134 LABEL_NUSES (ok_label) = 1;
29137 /* Return the internal arg pointer used for function incoming
29138 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29139 to copy it to a pseudo in order for it to be preserved over calls
29140 and suchlike. We'd really like to use a pseudo here for the
29141 internal arg pointer but data-flow analysis is not prepared to
29142 accept pseudos as live at the beginning of a function. */
29144 static rtx
29145 rs6000_internal_arg_pointer (void)
29147 if (flag_split_stack
29148 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29149 == NULL))
29152 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29154 rtx pat;
29156 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29157 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29159 /* Put the pseudo initialization right after the note at the
29160 beginning of the function. */
29161 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29162 gen_rtx_REG (Pmode, 12));
29163 push_topmost_sequence ();
29164 emit_insn_after (pat, get_insns ());
29165 pop_topmost_sequence ();
29167 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29168 FIRST_PARM_OFFSET (current_function_decl));
29170 return virtual_incoming_args_rtx;
29173 /* We may have to tell the dataflow pass that the split stack prologue
29174 is initializing a register. */
29176 static void
29177 rs6000_live_on_entry (bitmap regs)
29179 if (flag_split_stack)
29180 bitmap_set_bit (regs, 12);
29183 /* Emit -fsplit-stack dynamic stack allocation space check. */
29185 void
29186 rs6000_split_stack_space_check (rtx size, rtx label)
29188 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29189 rtx limit = gen_reg_rtx (Pmode);
29190 rtx requested = gen_reg_rtx (Pmode);
29191 rtx cmp = gen_reg_rtx (CCUNSmode);
29192 rtx jump;
29194 emit_insn (gen_load_split_stack_limit (limit));
29195 if (CONST_INT_P (size))
29196 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29197 else
29199 size = force_reg (Pmode, size);
29200 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29202 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29203 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29204 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29205 gen_rtx_LABEL_REF (VOIDmode, label),
29206 pc_rtx);
29207 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29208 JUMP_LABEL (jump) = label;
29211 /* A C compound statement that outputs the assembler code for a thunk
29212 function, used to implement C++ virtual function calls with
29213 multiple inheritance. The thunk acts as a wrapper around a virtual
29214 function, adjusting the implicit object parameter before handing
29215 control off to the real function.
29217 First, emit code to add the integer DELTA to the location that
29218 contains the incoming first argument. Assume that this argument
29219 contains a pointer, and is the one used to pass the `this' pointer
29220 in C++. This is the incoming argument *before* the function
29221 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29222 values of all other incoming arguments.
29224 After the addition, emit code to jump to FUNCTION, which is a
29225 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29226 not touch the return address. Hence returning from FUNCTION will
29227 return to whoever called the current `thunk'.
29229 The effect must be as if FUNCTION had been called directly with the
29230 adjusted first argument. This macro is responsible for emitting
29231 all of the code for a thunk function; output_function_prologue()
29232 and output_function_epilogue() are not invoked.
29234 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29235 been extracted from it.) It might possibly be useful on some
29236 targets, but probably not.
29238 If you do not define this macro, the target-independent code in the
29239 C++ frontend will generate a less efficient heavyweight thunk that
29240 calls FUNCTION instead of jumping to it. The generic approach does
29241 not support varargs. */
29243 static void
29244 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29245 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29246 tree function)
29248 rtx this_rtx, funexp;
29249 rtx_insn *insn;
29251 reload_completed = 1;
29252 epilogue_completed = 1;
29254 /* Mark the end of the (empty) prologue. */
29255 emit_note (NOTE_INSN_PROLOGUE_END);
29257 /* Find the "this" pointer. If the function returns a structure,
29258 the structure return pointer is in r3. */
29259 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29260 this_rtx = gen_rtx_REG (Pmode, 4);
29261 else
29262 this_rtx = gen_rtx_REG (Pmode, 3);
29264 /* Apply the constant offset, if required. */
29265 if (delta)
29266 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29268 /* Apply the offset from the vtable, if required. */
29269 if (vcall_offset)
29271 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29272 rtx tmp = gen_rtx_REG (Pmode, 12);
29274 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29275 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29277 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29278 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29280 else
29282 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29284 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29286 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29289 /* Generate a tail call to the target function. */
29290 if (!TREE_USED (function))
29292 assemble_external (function);
29293 TREE_USED (function) = 1;
29295 funexp = XEXP (DECL_RTL (function), 0);
29296 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29298 #if TARGET_MACHO
29299 if (MACHOPIC_INDIRECT)
29300 funexp = machopic_indirect_call_target (funexp);
29301 #endif
29303 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29304 generate sibcall RTL explicitly. */
29305 insn = emit_call_insn (
29306 gen_rtx_PARALLEL (VOIDmode,
29307 gen_rtvec (3,
29308 gen_rtx_CALL (VOIDmode,
29309 funexp, const0_rtx),
29310 gen_rtx_USE (VOIDmode, const0_rtx),
29311 simple_return_rtx)));
29312 SIBLING_CALL_P (insn) = 1;
29313 emit_barrier ();
29315 /* Run just enough of rest_of_compilation to get the insns emitted.
29316 There's not really enough bulk here to make other passes such as
29317 instruction scheduling worth while. Note that use_thunk calls
29318 assemble_start_function and assemble_end_function. */
29319 insn = get_insns ();
29320 shorten_branches (insn);
29321 final_start_function (insn, file, 1);
29322 final (insn, file, 1);
29323 final_end_function ();
29325 reload_completed = 0;
29326 epilogue_completed = 0;
29329 /* A quick summary of the various types of 'constant-pool tables'
29330 under PowerPC:
29332 Target Flags Name One table per
29333 AIX (none) AIX TOC object file
29334 AIX -mfull-toc AIX TOC object file
29335 AIX -mminimal-toc AIX minimal TOC translation unit
29336 SVR4/EABI (none) SVR4 SDATA object file
29337 SVR4/EABI -fpic SVR4 pic object file
29338 SVR4/EABI -fPIC SVR4 PIC translation unit
29339 SVR4/EABI -mrelocatable EABI TOC function
29340 SVR4/EABI -maix AIX TOC object file
29341 SVR4/EABI -maix -mminimal-toc
29342 AIX minimal TOC translation unit
29344 Name Reg. Set by entries contains:
29345 made by addrs? fp? sum?
29347 AIX TOC 2 crt0 as Y option option
29348 AIX minimal TOC 30 prolog gcc Y Y option
29349 SVR4 SDATA 13 crt0 gcc N Y N
29350 SVR4 pic 30 prolog ld Y not yet N
29351 SVR4 PIC 30 prolog gcc Y option option
29352 EABI TOC 30 prolog gcc Y option option
29356 /* Hash functions for the hash table. */
29358 static unsigned
29359 rs6000_hash_constant (rtx k)
29361 enum rtx_code code = GET_CODE (k);
29362 machine_mode mode = GET_MODE (k);
29363 unsigned result = (code << 3) ^ mode;
29364 const char *format;
29365 int flen, fidx;
29367 format = GET_RTX_FORMAT (code);
29368 flen = strlen (format);
29369 fidx = 0;
29371 switch (code)
29373 case LABEL_REF:
29374 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29376 case CONST_WIDE_INT:
29378 int i;
29379 flen = CONST_WIDE_INT_NUNITS (k);
29380 for (i = 0; i < flen; i++)
29381 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29382 return result;
29385 case CONST_DOUBLE:
29386 if (mode != VOIDmode)
29387 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29388 flen = 2;
29389 break;
29391 case CODE_LABEL:
29392 fidx = 3;
29393 break;
29395 default:
29396 break;
29399 for (; fidx < flen; fidx++)
29400 switch (format[fidx])
29402 case 's':
29404 unsigned i, len;
29405 const char *str = XSTR (k, fidx);
29406 len = strlen (str);
29407 result = result * 613 + len;
29408 for (i = 0; i < len; i++)
29409 result = result * 613 + (unsigned) str[i];
29410 break;
29412 case 'u':
29413 case 'e':
29414 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29415 break;
29416 case 'i':
29417 case 'n':
29418 result = result * 613 + (unsigned) XINT (k, fidx);
29419 break;
29420 case 'w':
29421 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29422 result = result * 613 + (unsigned) XWINT (k, fidx);
29423 else
29425 size_t i;
29426 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29427 result = result * 613 + (unsigned) (XWINT (k, fidx)
29428 >> CHAR_BIT * i);
29430 break;
29431 case '0':
29432 break;
29433 default:
29434 gcc_unreachable ();
29437 return result;
29440 hashval_t
29441 toc_hasher::hash (toc_hash_struct *thc)
29443 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29446 /* Compare H1 and H2 for equivalence. */
29448 bool
29449 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29451 rtx r1 = h1->key;
29452 rtx r2 = h2->key;
29454 if (h1->key_mode != h2->key_mode)
29455 return 0;
29457 return rtx_equal_p (r1, r2);
29460 /* These are the names given by the C++ front-end to vtables, and
29461 vtable-like objects. Ideally, this logic should not be here;
29462 instead, there should be some programmatic way of inquiring as
29463 to whether or not an object is a vtable. */
29465 #define VTABLE_NAME_P(NAME) \
29466 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29467 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29468 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29469 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29470 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29472 #ifdef NO_DOLLAR_IN_LABEL
29473 /* Return a GGC-allocated character string translating dollar signs in
29474 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29476 const char *
29477 rs6000_xcoff_strip_dollar (const char *name)
29479 char *strip, *p;
29480 const char *q;
29481 size_t len;
29483 q = (const char *) strchr (name, '$');
29485 if (q == 0 || q == name)
29486 return name;
29488 len = strlen (name);
29489 strip = XALLOCAVEC (char, len + 1);
29490 strcpy (strip, name);
29491 p = strip + (q - name);
29492 while (p)
29494 *p = '_';
29495 p = strchr (p + 1, '$');
29498 return ggc_alloc_string (strip, len);
29500 #endif
29502 void
29503 rs6000_output_symbol_ref (FILE *file, rtx x)
29505 const char *name = XSTR (x, 0);
29507 /* Currently C++ toc references to vtables can be emitted before it
29508 is decided whether the vtable is public or private. If this is
29509 the case, then the linker will eventually complain that there is
29510 a reference to an unknown section. Thus, for vtables only,
29511 we emit the TOC reference to reference the identifier and not the
29512 symbol. */
29513 if (VTABLE_NAME_P (name))
29515 RS6000_OUTPUT_BASENAME (file, name);
29517 else
29518 assemble_name (file, name);
29521 /* Output a TOC entry. We derive the entry name from what is being
29522 written. */
29524 void
29525 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29527 char buf[256];
29528 const char *name = buf;
29529 rtx base = x;
29530 HOST_WIDE_INT offset = 0;
29532 gcc_assert (!TARGET_NO_TOC);
29534 /* When the linker won't eliminate them, don't output duplicate
29535 TOC entries (this happens on AIX if there is any kind of TOC,
29536 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29537 CODE_LABELs. */
29538 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29540 struct toc_hash_struct *h;
29542 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29543 time because GGC is not initialized at that point. */
29544 if (toc_hash_table == NULL)
29545 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29547 h = ggc_alloc<toc_hash_struct> ();
29548 h->key = x;
29549 h->key_mode = mode;
29550 h->labelno = labelno;
29552 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29553 if (*found == NULL)
29554 *found = h;
29555 else /* This is indeed a duplicate.
29556 Set this label equal to that label. */
29558 fputs ("\t.set ", file);
29559 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29560 fprintf (file, "%d,", labelno);
29561 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29562 fprintf (file, "%d\n", ((*found)->labelno));
29564 #ifdef HAVE_AS_TLS
29565 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29566 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29567 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29569 fputs ("\t.set ", file);
29570 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29571 fprintf (file, "%d,", labelno);
29572 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29573 fprintf (file, "%d\n", ((*found)->labelno));
29575 #endif
29576 return;
29580 /* If we're going to put a double constant in the TOC, make sure it's
29581 aligned properly when strict alignment is on. */
29582 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29583 && STRICT_ALIGNMENT
29584 && GET_MODE_BITSIZE (mode) >= 64
29585 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29586 ASM_OUTPUT_ALIGN (file, 3);
29589 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29591 /* Handle FP constants specially. Note that if we have a minimal
29592 TOC, things we put here aren't actually in the TOC, so we can allow
29593 FP constants. */
29594 if (GET_CODE (x) == CONST_DOUBLE &&
29595 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29596 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29598 long k[4];
29600 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29601 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29602 else
29603 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29605 if (TARGET_64BIT)
29607 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29608 fputs (DOUBLE_INT_ASM_OP, file);
29609 else
29610 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29611 k[0] & 0xffffffff, k[1] & 0xffffffff,
29612 k[2] & 0xffffffff, k[3] & 0xffffffff);
29613 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29614 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29615 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29616 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29617 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29618 return;
29620 else
29622 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29623 fputs ("\t.long ", file);
29624 else
29625 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29626 k[0] & 0xffffffff, k[1] & 0xffffffff,
29627 k[2] & 0xffffffff, k[3] & 0xffffffff);
29628 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29629 k[0] & 0xffffffff, k[1] & 0xffffffff,
29630 k[2] & 0xffffffff, k[3] & 0xffffffff);
29631 return;
29634 else if (GET_CODE (x) == CONST_DOUBLE &&
29635 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29637 long k[2];
29639 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29640 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29641 else
29642 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29644 if (TARGET_64BIT)
29646 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29647 fputs (DOUBLE_INT_ASM_OP, file);
29648 else
29649 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29650 k[0] & 0xffffffff, k[1] & 0xffffffff);
29651 fprintf (file, "0x%lx%08lx\n",
29652 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29653 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29654 return;
29656 else
29658 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29659 fputs ("\t.long ", file);
29660 else
29661 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29662 k[0] & 0xffffffff, k[1] & 0xffffffff);
29663 fprintf (file, "0x%lx,0x%lx\n",
29664 k[0] & 0xffffffff, k[1] & 0xffffffff);
29665 return;
29668 else if (GET_CODE (x) == CONST_DOUBLE &&
29669 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29671 long l;
29673 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29674 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29675 else
29676 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29678 if (TARGET_64BIT)
29680 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29681 fputs (DOUBLE_INT_ASM_OP, file);
29682 else
29683 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29684 if (WORDS_BIG_ENDIAN)
29685 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29686 else
29687 fprintf (file, "0x%lx\n", l & 0xffffffff);
29688 return;
29690 else
29692 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29693 fputs ("\t.long ", file);
29694 else
29695 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29696 fprintf (file, "0x%lx\n", l & 0xffffffff);
29697 return;
29700 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29702 unsigned HOST_WIDE_INT low;
29703 HOST_WIDE_INT high;
29705 low = INTVAL (x) & 0xffffffff;
29706 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29708 /* TOC entries are always Pmode-sized, so when big-endian
29709 smaller integer constants in the TOC need to be padded.
29710 (This is still a win over putting the constants in
29711 a separate constant pool, because then we'd have
29712 to have both a TOC entry _and_ the actual constant.)
29714 For a 32-bit target, CONST_INT values are loaded and shifted
29715 entirely within `low' and can be stored in one TOC entry. */
29717 /* It would be easy to make this work, but it doesn't now. */
29718 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29720 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29722 low |= high << 32;
29723 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29724 high = (HOST_WIDE_INT) low >> 32;
29725 low &= 0xffffffff;
29728 if (TARGET_64BIT)
29730 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29731 fputs (DOUBLE_INT_ASM_OP, file);
29732 else
29733 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29734 (long) high & 0xffffffff, (long) low & 0xffffffff);
29735 fprintf (file, "0x%lx%08lx\n",
29736 (long) high & 0xffffffff, (long) low & 0xffffffff);
29737 return;
29739 else
29741 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29743 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29744 fputs ("\t.long ", file);
29745 else
29746 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29747 (long) high & 0xffffffff, (long) low & 0xffffffff);
29748 fprintf (file, "0x%lx,0x%lx\n",
29749 (long) high & 0xffffffff, (long) low & 0xffffffff);
29751 else
29753 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29754 fputs ("\t.long ", file);
29755 else
29756 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29757 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29759 return;
29763 if (GET_CODE (x) == CONST)
29765 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29766 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29768 base = XEXP (XEXP (x, 0), 0);
29769 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29772 switch (GET_CODE (base))
29774 case SYMBOL_REF:
29775 name = XSTR (base, 0);
29776 break;
29778 case LABEL_REF:
29779 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29780 CODE_LABEL_NUMBER (XEXP (base, 0)));
29781 break;
29783 case CODE_LABEL:
29784 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29785 break;
29787 default:
29788 gcc_unreachable ();
29791 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29792 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29793 else
29795 fputs ("\t.tc ", file);
29796 RS6000_OUTPUT_BASENAME (file, name);
29798 if (offset < 0)
29799 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29800 else if (offset)
29801 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29803 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29804 after other TOC symbols, reducing overflow of small TOC access
29805 to [TC] symbols. */
29806 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29807 ? "[TE]," : "[TC],", file);
29810 /* Currently C++ toc references to vtables can be emitted before it
29811 is decided whether the vtable is public or private. If this is
29812 the case, then the linker will eventually complain that there is
29813 a TOC reference to an unknown section. Thus, for vtables only,
29814 we emit the TOC reference to reference the symbol and not the
29815 section. */
29816 if (VTABLE_NAME_P (name))
29818 RS6000_OUTPUT_BASENAME (file, name);
29819 if (offset < 0)
29820 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29821 else if (offset > 0)
29822 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29824 else
29825 output_addr_const (file, x);
29827 #if HAVE_AS_TLS
29828 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29830 switch (SYMBOL_REF_TLS_MODEL (base))
29832 case 0:
29833 break;
29834 case TLS_MODEL_LOCAL_EXEC:
29835 fputs ("@le", file);
29836 break;
29837 case TLS_MODEL_INITIAL_EXEC:
29838 fputs ("@ie", file);
29839 break;
29840 /* Use global-dynamic for local-dynamic. */
29841 case TLS_MODEL_GLOBAL_DYNAMIC:
29842 case TLS_MODEL_LOCAL_DYNAMIC:
29843 putc ('\n', file);
29844 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29845 fputs ("\t.tc .", file);
29846 RS6000_OUTPUT_BASENAME (file, name);
29847 fputs ("[TC],", file);
29848 output_addr_const (file, x);
29849 fputs ("@m", file);
29850 break;
29851 default:
29852 gcc_unreachable ();
29855 #endif
29857 putc ('\n', file);
29860 /* Output an assembler pseudo-op to write an ASCII string of N characters
29861 starting at P to FILE.
29863 On the RS/6000, we have to do this using the .byte operation and
29864 write out special characters outside the quoted string.
29865 Also, the assembler is broken; very long strings are truncated,
29866 so we must artificially break them up early. */
29868 void
29869 output_ascii (FILE *file, const char *p, int n)
29871 char c;
29872 int i, count_string;
29873 const char *for_string = "\t.byte \"";
29874 const char *for_decimal = "\t.byte ";
29875 const char *to_close = NULL;
29877 count_string = 0;
29878 for (i = 0; i < n; i++)
29880 c = *p++;
29881 if (c >= ' ' && c < 0177)
29883 if (for_string)
29884 fputs (for_string, file);
29885 putc (c, file);
29887 /* Write two quotes to get one. */
29888 if (c == '"')
29890 putc (c, file);
29891 ++count_string;
29894 for_string = NULL;
29895 for_decimal = "\"\n\t.byte ";
29896 to_close = "\"\n";
29897 ++count_string;
29899 if (count_string >= 512)
29901 fputs (to_close, file);
29903 for_string = "\t.byte \"";
29904 for_decimal = "\t.byte ";
29905 to_close = NULL;
29906 count_string = 0;
29909 else
29911 if (for_decimal)
29912 fputs (for_decimal, file);
29913 fprintf (file, "%d", c);
29915 for_string = "\n\t.byte \"";
29916 for_decimal = ", ";
29917 to_close = "\n";
29918 count_string = 0;
29922 /* Now close the string if we have written one. Then end the line. */
29923 if (to_close)
29924 fputs (to_close, file);
29927 /* Generate a unique section name for FILENAME for a section type
29928 represented by SECTION_DESC. Output goes into BUF.
29930 SECTION_DESC can be any string, as long as it is different for each
29931 possible section type.
29933 We name the section in the same manner as xlc. The name begins with an
29934 underscore followed by the filename (after stripping any leading directory
29935 names) with the last period replaced by the string SECTION_DESC. If
29936 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29937 the name. */
29939 void
29940 rs6000_gen_section_name (char **buf, const char *filename,
29941 const char *section_desc)
29943 const char *q, *after_last_slash, *last_period = 0;
29944 char *p;
29945 int len;
29947 after_last_slash = filename;
29948 for (q = filename; *q; q++)
29950 if (*q == '/')
29951 after_last_slash = q + 1;
29952 else if (*q == '.')
29953 last_period = q;
29956 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29957 *buf = (char *) xmalloc (len);
29959 p = *buf;
29960 *p++ = '_';
29962 for (q = after_last_slash; *q; q++)
29964 if (q == last_period)
29966 strcpy (p, section_desc);
29967 p += strlen (section_desc);
29968 break;
29971 else if (ISALNUM (*q))
29972 *p++ = *q;
29975 if (last_period == 0)
29976 strcpy (p, section_desc);
29977 else
29978 *p = '\0';
29981 /* Emit profile function. */
29983 void
29984 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29986 /* Non-standard profiling for kernels, which just saves LR then calls
29987 _mcount without worrying about arg saves. The idea is to change
29988 the function prologue as little as possible as it isn't easy to
29989 account for arg save/restore code added just for _mcount. */
29990 if (TARGET_PROFILE_KERNEL)
29991 return;
29993 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29995 #ifndef NO_PROFILE_COUNTERS
29996 # define NO_PROFILE_COUNTERS 0
29997 #endif
29998 if (NO_PROFILE_COUNTERS)
29999 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30000 LCT_NORMAL, VOIDmode);
30001 else
30003 char buf[30];
30004 const char *label_name;
30005 rtx fun;
30007 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30008 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30009 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30011 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30012 LCT_NORMAL, VOIDmode, fun, Pmode);
30015 else if (DEFAULT_ABI == ABI_DARWIN)
30017 const char *mcount_name = RS6000_MCOUNT;
30018 int caller_addr_regno = LR_REGNO;
30020 /* Be conservative and always set this, at least for now. */
30021 crtl->uses_pic_offset_table = 1;
30023 #if TARGET_MACHO
30024 /* For PIC code, set up a stub and collect the caller's address
30025 from r0, which is where the prologue puts it. */
30026 if (MACHOPIC_INDIRECT
30027 && crtl->uses_pic_offset_table)
30028 caller_addr_regno = 0;
30029 #endif
30030 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30031 LCT_NORMAL, VOIDmode,
30032 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30036 /* Write function profiler code. */
30038 void
30039 output_function_profiler (FILE *file, int labelno)
30041 char buf[100];
30043 switch (DEFAULT_ABI)
30045 default:
30046 gcc_unreachable ();
30048 case ABI_V4:
30049 if (!TARGET_32BIT)
30051 warning (0, "no profiling of 64-bit code for this ABI");
30052 return;
30054 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30055 fprintf (file, "\tmflr %s\n", reg_names[0]);
30056 if (NO_PROFILE_COUNTERS)
30058 asm_fprintf (file, "\tstw %s,4(%s)\n",
30059 reg_names[0], reg_names[1]);
30061 else if (TARGET_SECURE_PLT && flag_pic)
30063 if (TARGET_LINK_STACK)
30065 char name[32];
30066 get_ppc476_thunk_name (name);
30067 asm_fprintf (file, "\tbl %s\n", name);
30069 else
30070 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30071 asm_fprintf (file, "\tstw %s,4(%s)\n",
30072 reg_names[0], reg_names[1]);
30073 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30074 asm_fprintf (file, "\taddis %s,%s,",
30075 reg_names[12], reg_names[12]);
30076 assemble_name (file, buf);
30077 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30078 assemble_name (file, buf);
30079 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30081 else if (flag_pic == 1)
30083 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30084 asm_fprintf (file, "\tstw %s,4(%s)\n",
30085 reg_names[0], reg_names[1]);
30086 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30087 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30088 assemble_name (file, buf);
30089 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30091 else if (flag_pic > 1)
30093 asm_fprintf (file, "\tstw %s,4(%s)\n",
30094 reg_names[0], reg_names[1]);
30095 /* Now, we need to get the address of the label. */
30096 if (TARGET_LINK_STACK)
30098 char name[32];
30099 get_ppc476_thunk_name (name);
30100 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30101 assemble_name (file, buf);
30102 fputs ("-.\n1:", file);
30103 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30104 asm_fprintf (file, "\taddi %s,%s,4\n",
30105 reg_names[11], reg_names[11]);
30107 else
30109 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30110 assemble_name (file, buf);
30111 fputs ("-.\n1:", file);
30112 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30114 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30115 reg_names[0], reg_names[11]);
30116 asm_fprintf (file, "\tadd %s,%s,%s\n",
30117 reg_names[0], reg_names[0], reg_names[11]);
30119 else
30121 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30122 assemble_name (file, buf);
30123 fputs ("@ha\n", file);
30124 asm_fprintf (file, "\tstw %s,4(%s)\n",
30125 reg_names[0], reg_names[1]);
30126 asm_fprintf (file, "\tla %s,", reg_names[0]);
30127 assemble_name (file, buf);
30128 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30131 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30132 fprintf (file, "\tbl %s%s\n",
30133 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30134 break;
30136 case ABI_AIX:
30137 case ABI_ELFv2:
30138 case ABI_DARWIN:
30139 /* Don't do anything, done in output_profile_hook (). */
30140 break;
30146 /* The following variable value is the last issued insn. */
30148 static rtx_insn *last_scheduled_insn;
30150 /* The following variable helps to balance issuing of load and
30151 store instructions */
30153 static int load_store_pendulum;
30155 /* The following variable helps pair divide insns during scheduling. */
30156 static int divide_cnt;
30157 /* The following variable helps pair and alternate vector and vector load
30158 insns during scheduling. */
30159 static int vec_pairing;
30162 /* Power4 load update and store update instructions are cracked into a
30163 load or store and an integer insn which are executed in the same cycle.
30164 Branches have their own dispatch slot which does not count against the
30165 GCC issue rate, but it changes the program flow so there are no other
30166 instructions to issue in this cycle. */
30168 static int
30169 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30171 last_scheduled_insn = insn;
30172 if (GET_CODE (PATTERN (insn)) == USE
30173 || GET_CODE (PATTERN (insn)) == CLOBBER)
30175 cached_can_issue_more = more;
30176 return cached_can_issue_more;
30179 if (insn_terminates_group_p (insn, current_group))
30181 cached_can_issue_more = 0;
30182 return cached_can_issue_more;
30185 /* If no reservation, but reach here */
30186 if (recog_memoized (insn) < 0)
30187 return more;
30189 if (rs6000_sched_groups)
30191 if (is_microcoded_insn (insn))
30192 cached_can_issue_more = 0;
30193 else if (is_cracked_insn (insn))
30194 cached_can_issue_more = more > 2 ? more - 2 : 0;
30195 else
30196 cached_can_issue_more = more - 1;
30198 return cached_can_issue_more;
30201 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
30202 return 0;
30204 cached_can_issue_more = more - 1;
30205 return cached_can_issue_more;
30208 static int
30209 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30211 int r = rs6000_variable_issue_1 (insn, more);
30212 if (verbose)
30213 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30214 return r;
30217 /* Adjust the cost of a scheduling dependency. Return the new cost of
30218 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30220 static int
30221 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30222 unsigned int)
30224 enum attr_type attr_type;
30226 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30227 return cost;
30229 switch (dep_type)
30231 case REG_DEP_TRUE:
30233 /* Data dependency; DEP_INSN writes a register that INSN reads
30234 some cycles later. */
30236 /* Separate a load from a narrower, dependent store. */
30237 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
30238 && GET_CODE (PATTERN (insn)) == SET
30239 && GET_CODE (PATTERN (dep_insn)) == SET
30240 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30241 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30242 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30243 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30244 return cost + 14;
30246 attr_type = get_attr_type (insn);
30248 switch (attr_type)
30250 case TYPE_JMPREG:
30251 /* Tell the first scheduling pass about the latency between
30252 a mtctr and bctr (and mtlr and br/blr). The first
30253 scheduling pass will not know about this latency since
30254 the mtctr instruction, which has the latency associated
30255 to it, will be generated by reload. */
30256 return 4;
30257 case TYPE_BRANCH:
30258 /* Leave some extra cycles between a compare and its
30259 dependent branch, to inhibit expensive mispredicts. */
30260 if ((rs6000_cpu_attr == CPU_PPC603
30261 || rs6000_cpu_attr == CPU_PPC604
30262 || rs6000_cpu_attr == CPU_PPC604E
30263 || rs6000_cpu_attr == CPU_PPC620
30264 || rs6000_cpu_attr == CPU_PPC630
30265 || rs6000_cpu_attr == CPU_PPC750
30266 || rs6000_cpu_attr == CPU_PPC7400
30267 || rs6000_cpu_attr == CPU_PPC7450
30268 || rs6000_cpu_attr == CPU_PPCE5500
30269 || rs6000_cpu_attr == CPU_PPCE6500
30270 || rs6000_cpu_attr == CPU_POWER4
30271 || rs6000_cpu_attr == CPU_POWER5
30272 || rs6000_cpu_attr == CPU_POWER7
30273 || rs6000_cpu_attr == CPU_POWER8
30274 || rs6000_cpu_attr == CPU_POWER9
30275 || rs6000_cpu_attr == CPU_CELL)
30276 && recog_memoized (dep_insn)
30277 && (INSN_CODE (dep_insn) >= 0))
30279 switch (get_attr_type (dep_insn))
30281 case TYPE_CMP:
30282 case TYPE_FPCOMPARE:
30283 case TYPE_CR_LOGICAL:
30284 case TYPE_DELAYED_CR:
30285 return cost + 2;
30286 case TYPE_EXTS:
30287 case TYPE_MUL:
30288 if (get_attr_dot (dep_insn) == DOT_YES)
30289 return cost + 2;
30290 else
30291 break;
30292 case TYPE_SHIFT:
30293 if (get_attr_dot (dep_insn) == DOT_YES
30294 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30295 return cost + 2;
30296 else
30297 break;
30298 default:
30299 break;
30301 break;
30303 case TYPE_STORE:
30304 case TYPE_FPSTORE:
30305 if ((rs6000_cpu == PROCESSOR_POWER6)
30306 && recog_memoized (dep_insn)
30307 && (INSN_CODE (dep_insn) >= 0))
30310 if (GET_CODE (PATTERN (insn)) != SET)
30311 /* If this happens, we have to extend this to schedule
30312 optimally. Return default for now. */
30313 return cost;
30315 /* Adjust the cost for the case where the value written
30316 by a fixed point operation is used as the address
30317 gen value on a store. */
30318 switch (get_attr_type (dep_insn))
30320 case TYPE_LOAD:
30321 case TYPE_CNTLZ:
30323 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30324 return get_attr_sign_extend (dep_insn)
30325 == SIGN_EXTEND_YES ? 6 : 4;
30326 break;
30328 case TYPE_SHIFT:
30330 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30331 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30332 6 : 3;
30333 break;
30335 case TYPE_INTEGER:
30336 case TYPE_ADD:
30337 case TYPE_LOGICAL:
30338 case TYPE_EXTS:
30339 case TYPE_INSERT:
30341 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30342 return 3;
30343 break;
30345 case TYPE_STORE:
30346 case TYPE_FPLOAD:
30347 case TYPE_FPSTORE:
30349 if (get_attr_update (dep_insn) == UPDATE_YES
30350 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30351 return 3;
30352 break;
30354 case TYPE_MUL:
30356 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30357 return 17;
30358 break;
30360 case TYPE_DIV:
30362 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30363 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30364 break;
30366 default:
30367 break;
30370 break;
30372 case TYPE_LOAD:
30373 if ((rs6000_cpu == PROCESSOR_POWER6)
30374 && recog_memoized (dep_insn)
30375 && (INSN_CODE (dep_insn) >= 0))
30378 /* Adjust the cost for the case where the value written
30379 by a fixed point instruction is used within the address
30380 gen portion of a subsequent load(u)(x) */
30381 switch (get_attr_type (dep_insn))
30383 case TYPE_LOAD:
30384 case TYPE_CNTLZ:
30386 if (set_to_load_agen (dep_insn, insn))
30387 return get_attr_sign_extend (dep_insn)
30388 == SIGN_EXTEND_YES ? 6 : 4;
30389 break;
30391 case TYPE_SHIFT:
30393 if (set_to_load_agen (dep_insn, insn))
30394 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30395 6 : 3;
30396 break;
30398 case TYPE_INTEGER:
30399 case TYPE_ADD:
30400 case TYPE_LOGICAL:
30401 case TYPE_EXTS:
30402 case TYPE_INSERT:
30404 if (set_to_load_agen (dep_insn, insn))
30405 return 3;
30406 break;
30408 case TYPE_STORE:
30409 case TYPE_FPLOAD:
30410 case TYPE_FPSTORE:
30412 if (get_attr_update (dep_insn) == UPDATE_YES
30413 && set_to_load_agen (dep_insn, insn))
30414 return 3;
30415 break;
30417 case TYPE_MUL:
30419 if (set_to_load_agen (dep_insn, insn))
30420 return 17;
30421 break;
30423 case TYPE_DIV:
30425 if (set_to_load_agen (dep_insn, insn))
30426 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30427 break;
30429 default:
30430 break;
30433 break;
30435 case TYPE_FPLOAD:
30436 if ((rs6000_cpu == PROCESSOR_POWER6)
30437 && get_attr_update (insn) == UPDATE_NO
30438 && recog_memoized (dep_insn)
30439 && (INSN_CODE (dep_insn) >= 0)
30440 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30441 return 2;
30443 default:
30444 break;
30447 /* Fall out to return default cost. */
30449 break;
30451 case REG_DEP_OUTPUT:
30452 /* Output dependency; DEP_INSN writes a register that INSN writes some
30453 cycles later. */
30454 if ((rs6000_cpu == PROCESSOR_POWER6)
30455 && recog_memoized (dep_insn)
30456 && (INSN_CODE (dep_insn) >= 0))
30458 attr_type = get_attr_type (insn);
30460 switch (attr_type)
30462 case TYPE_FP:
30463 case TYPE_FPSIMPLE:
30464 if (get_attr_type (dep_insn) == TYPE_FP
30465 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30466 return 1;
30467 break;
30468 case TYPE_FPLOAD:
30469 if (get_attr_update (insn) == UPDATE_NO
30470 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30471 return 2;
30472 break;
30473 default:
30474 break;
30477 /* Fall through, no cost for output dependency. */
30478 /* FALLTHRU */
30480 case REG_DEP_ANTI:
30481 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30482 cycles later. */
30483 return 0;
30485 default:
30486 gcc_unreachable ();
30489 return cost;
30492 /* Debug version of rs6000_adjust_cost. */
30494 static int
30495 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30496 int cost, unsigned int dw)
30498 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30500 if (ret != cost)
30502 const char *dep;
30504 switch (dep_type)
30506 default: dep = "unknown depencency"; break;
30507 case REG_DEP_TRUE: dep = "data dependency"; break;
30508 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30509 case REG_DEP_ANTI: dep = "anti depencency"; break;
30512 fprintf (stderr,
30513 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30514 "%s, insn:\n", ret, cost, dep);
30516 debug_rtx (insn);
30519 return ret;
30522 /* The function returns a true if INSN is microcoded.
30523 Return false otherwise. */
30525 static bool
30526 is_microcoded_insn (rtx_insn *insn)
30528 if (!insn || !NONDEBUG_INSN_P (insn)
30529 || GET_CODE (PATTERN (insn)) == USE
30530 || GET_CODE (PATTERN (insn)) == CLOBBER)
30531 return false;
30533 if (rs6000_cpu_attr == CPU_CELL)
30534 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30536 if (rs6000_sched_groups
30537 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30539 enum attr_type type = get_attr_type (insn);
30540 if ((type == TYPE_LOAD
30541 && get_attr_update (insn) == UPDATE_YES
30542 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30543 || ((type == TYPE_LOAD || type == TYPE_STORE)
30544 && get_attr_update (insn) == UPDATE_YES
30545 && get_attr_indexed (insn) == INDEXED_YES)
30546 || type == TYPE_MFCR)
30547 return true;
30550 return false;
30553 /* The function returns true if INSN is cracked into 2 instructions
30554 by the processor (and therefore occupies 2 issue slots). */
30556 static bool
30557 is_cracked_insn (rtx_insn *insn)
30559 if (!insn || !NONDEBUG_INSN_P (insn)
30560 || GET_CODE (PATTERN (insn)) == USE
30561 || GET_CODE (PATTERN (insn)) == CLOBBER)
30562 return false;
30564 if (rs6000_sched_groups
30565 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
30567 enum attr_type type = get_attr_type (insn);
30568 if ((type == TYPE_LOAD
30569 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30570 && get_attr_update (insn) == UPDATE_NO)
30571 || (type == TYPE_LOAD
30572 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30573 && get_attr_update (insn) == UPDATE_YES
30574 && get_attr_indexed (insn) == INDEXED_NO)
30575 || (type == TYPE_STORE
30576 && get_attr_update (insn) == UPDATE_YES
30577 && get_attr_indexed (insn) == INDEXED_NO)
30578 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30579 && get_attr_update (insn) == UPDATE_YES)
30580 || type == TYPE_DELAYED_CR
30581 || (type == TYPE_EXTS
30582 && get_attr_dot (insn) == DOT_YES)
30583 || (type == TYPE_SHIFT
30584 && get_attr_dot (insn) == DOT_YES
30585 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30586 || (type == TYPE_MUL
30587 && get_attr_dot (insn) == DOT_YES)
30588 || type == TYPE_DIV
30589 || (type == TYPE_INSERT
30590 && get_attr_size (insn) == SIZE_32))
30591 return true;
30594 return false;
30597 /* The function returns true if INSN can be issued only from
30598 the branch slot. */
30600 static bool
30601 is_branch_slot_insn (rtx_insn *insn)
30603 if (!insn || !NONDEBUG_INSN_P (insn)
30604 || GET_CODE (PATTERN (insn)) == USE
30605 || GET_CODE (PATTERN (insn)) == CLOBBER)
30606 return false;
30608 if (rs6000_sched_groups)
30610 enum attr_type type = get_attr_type (insn);
30611 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30612 return true;
30613 return false;
30616 return false;
30619 /* The function returns true if out_inst sets a value that is
30620 used in the address generation computation of in_insn */
30621 static bool
30622 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30624 rtx out_set, in_set;
30626 /* For performance reasons, only handle the simple case where
30627 both loads are a single_set. */
30628 out_set = single_set (out_insn);
30629 if (out_set)
30631 in_set = single_set (in_insn);
30632 if (in_set)
30633 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30636 return false;
30639 /* Try to determine base/offset/size parts of the given MEM.
30640 Return true if successful, false if all the values couldn't
30641 be determined.
30643 This function only looks for REG or REG+CONST address forms.
30644 REG+REG address form will return false. */
30646 static bool
30647 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30648 HOST_WIDE_INT *size)
30650 rtx addr_rtx;
30651 if MEM_SIZE_KNOWN_P (mem)
30652 *size = MEM_SIZE (mem);
30653 else
30654 return false;
30656 addr_rtx = (XEXP (mem, 0));
30657 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30658 addr_rtx = XEXP (addr_rtx, 1);
30660 *offset = 0;
30661 while (GET_CODE (addr_rtx) == PLUS
30662 && CONST_INT_P (XEXP (addr_rtx, 1)))
30664 *offset += INTVAL (XEXP (addr_rtx, 1));
30665 addr_rtx = XEXP (addr_rtx, 0);
30667 if (!REG_P (addr_rtx))
30668 return false;
30670 *base = addr_rtx;
30671 return true;
30674 /* The function returns true if the target storage location of
30675 mem1 is adjacent to the target storage location of mem2 */
30676 /* Return 1 if memory locations are adjacent. */
30678 static bool
30679 adjacent_mem_locations (rtx mem1, rtx mem2)
30681 rtx reg1, reg2;
30682 HOST_WIDE_INT off1, size1, off2, size2;
30684 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30685 && get_memref_parts (mem2, &reg2, &off2, &size2))
30686 return ((REGNO (reg1) == REGNO (reg2))
30687 && ((off1 + size1 == off2)
30688 || (off2 + size2 == off1)));
30690 return false;
30693 /* This function returns true if it can be determined that the two MEM
30694 locations overlap by at least 1 byte based on base reg/offset/size. */
30696 static bool
30697 mem_locations_overlap (rtx mem1, rtx mem2)
30699 rtx reg1, reg2;
30700 HOST_WIDE_INT off1, size1, off2, size2;
30702 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30703 && get_memref_parts (mem2, &reg2, &off2, &size2))
30704 return ((REGNO (reg1) == REGNO (reg2))
30705 && (((off1 <= off2) && (off1 + size1 > off2))
30706 || ((off2 <= off1) && (off2 + size2 > off1))));
30708 return false;
30711 /* A C statement (sans semicolon) to update the integer scheduling
30712 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30713 INSN earlier, reduce the priority to execute INSN later. Do not
30714 define this macro if you do not need to adjust the scheduling
30715 priorities of insns. */
30717 static int
30718 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30720 rtx load_mem, str_mem;
30721 /* On machines (like the 750) which have asymmetric integer units,
30722 where one integer unit can do multiply and divides and the other
30723 can't, reduce the priority of multiply/divide so it is scheduled
30724 before other integer operations. */
30726 #if 0
30727 if (! INSN_P (insn))
30728 return priority;
30730 if (GET_CODE (PATTERN (insn)) == USE)
30731 return priority;
30733 switch (rs6000_cpu_attr) {
30734 case CPU_PPC750:
30735 switch (get_attr_type (insn))
30737 default:
30738 break;
30740 case TYPE_MUL:
30741 case TYPE_DIV:
30742 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30743 priority, priority);
30744 if (priority >= 0 && priority < 0x01000000)
30745 priority >>= 3;
30746 break;
30749 #endif
30751 if (insn_must_be_first_in_group (insn)
30752 && reload_completed
30753 && current_sched_info->sched_max_insns_priority
30754 && rs6000_sched_restricted_insns_priority)
30757 /* Prioritize insns that can be dispatched only in the first
30758 dispatch slot. */
30759 if (rs6000_sched_restricted_insns_priority == 1)
30760 /* Attach highest priority to insn. This means that in
30761 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30762 precede 'priority' (critical path) considerations. */
30763 return current_sched_info->sched_max_insns_priority;
30764 else if (rs6000_sched_restricted_insns_priority == 2)
30765 /* Increase priority of insn by a minimal amount. This means that in
30766 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30767 considerations precede dispatch-slot restriction considerations. */
30768 return (priority + 1);
30771 if (rs6000_cpu == PROCESSOR_POWER6
30772 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30773 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30774 /* Attach highest priority to insn if the scheduler has just issued two
30775 stores and this instruction is a load, or two loads and this instruction
30776 is a store. Power6 wants loads and stores scheduled alternately
30777 when possible */
30778 return current_sched_info->sched_max_insns_priority;
30780 return priority;
30783 /* Return true if the instruction is nonpipelined on the Cell. */
30784 static bool
30785 is_nonpipeline_insn (rtx_insn *insn)
30787 enum attr_type type;
30788 if (!insn || !NONDEBUG_INSN_P (insn)
30789 || GET_CODE (PATTERN (insn)) == USE
30790 || GET_CODE (PATTERN (insn)) == CLOBBER)
30791 return false;
30793 type = get_attr_type (insn);
30794 if (type == TYPE_MUL
30795 || type == TYPE_DIV
30796 || type == TYPE_SDIV
30797 || type == TYPE_DDIV
30798 || type == TYPE_SSQRT
30799 || type == TYPE_DSQRT
30800 || type == TYPE_MFCR
30801 || type == TYPE_MFCRF
30802 || type == TYPE_MFJMPR)
30804 return true;
30806 return false;
30810 /* Return how many instructions the machine can issue per cycle. */
30812 static int
30813 rs6000_issue_rate (void)
30815 /* Unless scheduling for register pressure, use issue rate of 1 for
30816 first scheduling pass to decrease degradation. */
30817 if (!reload_completed && !flag_sched_pressure)
30818 return 1;
30820 switch (rs6000_cpu_attr) {
30821 case CPU_RS64A:
30822 case CPU_PPC601: /* ? */
30823 case CPU_PPC7450:
30824 return 3;
30825 case CPU_PPC440:
30826 case CPU_PPC603:
30827 case CPU_PPC750:
30828 case CPU_PPC7400:
30829 case CPU_PPC8540:
30830 case CPU_PPC8548:
30831 case CPU_CELL:
30832 case CPU_PPCE300C2:
30833 case CPU_PPCE300C3:
30834 case CPU_PPCE500MC:
30835 case CPU_PPCE500MC64:
30836 case CPU_PPCE5500:
30837 case CPU_PPCE6500:
30838 case CPU_TITAN:
30839 return 2;
30840 case CPU_PPC476:
30841 case CPU_PPC604:
30842 case CPU_PPC604E:
30843 case CPU_PPC620:
30844 case CPU_PPC630:
30845 return 4;
30846 case CPU_POWER4:
30847 case CPU_POWER5:
30848 case CPU_POWER6:
30849 case CPU_POWER7:
30850 return 5;
30851 case CPU_POWER8:
30852 return 7;
30853 case CPU_POWER9:
30854 return 6;
30855 default:
30856 return 1;
30860 /* Return how many instructions to look ahead for better insn
30861 scheduling. */
30863 static int
30864 rs6000_use_sched_lookahead (void)
30866 switch (rs6000_cpu_attr)
30868 case CPU_PPC8540:
30869 case CPU_PPC8548:
30870 return 4;
30872 case CPU_CELL:
30873 return (reload_completed ? 8 : 0);
30875 default:
30876 return 0;
30880 /* We are choosing insn from the ready queue. Return zero if INSN can be
30881 chosen. */
30882 static int
30883 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30885 if (ready_index == 0)
30886 return 0;
30888 if (rs6000_cpu_attr != CPU_CELL)
30889 return 0;
30891 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30893 if (!reload_completed
30894 || is_nonpipeline_insn (insn)
30895 || is_microcoded_insn (insn))
30896 return 1;
30898 return 0;
30901 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30902 and return true. */
30904 static bool
30905 find_mem_ref (rtx pat, rtx *mem_ref)
30907 const char * fmt;
30908 int i, j;
30910 /* stack_tie does not produce any real memory traffic. */
30911 if (tie_operand (pat, VOIDmode))
30912 return false;
30914 if (GET_CODE (pat) == MEM)
30916 *mem_ref = pat;
30917 return true;
30920 /* Recursively process the pattern. */
30921 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30923 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30925 if (fmt[i] == 'e')
30927 if (find_mem_ref (XEXP (pat, i), mem_ref))
30928 return true;
30930 else if (fmt[i] == 'E')
30931 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30933 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30934 return true;
30938 return false;
30941 /* Determine if PAT is a PATTERN of a load insn. */
30943 static bool
30944 is_load_insn1 (rtx pat, rtx *load_mem)
30946 if (!pat || pat == NULL_RTX)
30947 return false;
30949 if (GET_CODE (pat) == SET)
30950 return find_mem_ref (SET_SRC (pat), load_mem);
30952 if (GET_CODE (pat) == PARALLEL)
30954 int i;
30956 for (i = 0; i < XVECLEN (pat, 0); i++)
30957 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30958 return true;
30961 return false;
30964 /* Determine if INSN loads from memory. */
30966 static bool
30967 is_load_insn (rtx insn, rtx *load_mem)
30969 if (!insn || !INSN_P (insn))
30970 return false;
30972 if (CALL_P (insn))
30973 return false;
30975 return is_load_insn1 (PATTERN (insn), load_mem);
30978 /* Determine if PAT is a PATTERN of a store insn. */
30980 static bool
30981 is_store_insn1 (rtx pat, rtx *str_mem)
30983 if (!pat || pat == NULL_RTX)
30984 return false;
30986 if (GET_CODE (pat) == SET)
30987 return find_mem_ref (SET_DEST (pat), str_mem);
30989 if (GET_CODE (pat) == PARALLEL)
30991 int i;
30993 for (i = 0; i < XVECLEN (pat, 0); i++)
30994 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30995 return true;
30998 return false;
31001 /* Determine if INSN stores to memory. */
31003 static bool
31004 is_store_insn (rtx insn, rtx *str_mem)
31006 if (!insn || !INSN_P (insn))
31007 return false;
31009 return is_store_insn1 (PATTERN (insn), str_mem);
31012 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31014 static bool
31015 is_power9_pairable_vec_type (enum attr_type type)
31017 switch (type)
31019 case TYPE_VECSIMPLE:
31020 case TYPE_VECCOMPLEX:
31021 case TYPE_VECDIV:
31022 case TYPE_VECCMP:
31023 case TYPE_VECPERM:
31024 case TYPE_VECFLOAT:
31025 case TYPE_VECFDIV:
31026 case TYPE_VECDOUBLE:
31027 return true;
31028 default:
31029 break;
31031 return false;
31034 /* Returns whether the dependence between INSN and NEXT is considered
31035 costly by the given target. */
31037 static bool
31038 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31040 rtx insn;
31041 rtx next;
31042 rtx load_mem, str_mem;
31044 /* If the flag is not enabled - no dependence is considered costly;
31045 allow all dependent insns in the same group.
31046 This is the most aggressive option. */
31047 if (rs6000_sched_costly_dep == no_dep_costly)
31048 return false;
31050 /* If the flag is set to 1 - a dependence is always considered costly;
31051 do not allow dependent instructions in the same group.
31052 This is the most conservative option. */
31053 if (rs6000_sched_costly_dep == all_deps_costly)
31054 return true;
31056 insn = DEP_PRO (dep);
31057 next = DEP_CON (dep);
31059 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31060 && is_load_insn (next, &load_mem)
31061 && is_store_insn (insn, &str_mem))
31062 /* Prevent load after store in the same group. */
31063 return true;
31065 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31066 && is_load_insn (next, &load_mem)
31067 && is_store_insn (insn, &str_mem)
31068 && DEP_TYPE (dep) == REG_DEP_TRUE
31069 && mem_locations_overlap(str_mem, load_mem))
31070 /* Prevent load after store in the same group if it is a true
31071 dependence. */
31072 return true;
31074 /* The flag is set to X; dependences with latency >= X are considered costly,
31075 and will not be scheduled in the same group. */
31076 if (rs6000_sched_costly_dep <= max_dep_latency
31077 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31078 return true;
31080 return false;
31083 /* Return the next insn after INSN that is found before TAIL is reached,
31084 skipping any "non-active" insns - insns that will not actually occupy
31085 an issue slot. Return NULL_RTX if such an insn is not found. */
31087 static rtx_insn *
31088 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31090 if (insn == NULL_RTX || insn == tail)
31091 return NULL;
31093 while (1)
31095 insn = NEXT_INSN (insn);
31096 if (insn == NULL_RTX || insn == tail)
31097 return NULL;
31099 if (CALL_P (insn)
31100 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31101 || (NONJUMP_INSN_P (insn)
31102 && GET_CODE (PATTERN (insn)) != USE
31103 && GET_CODE (PATTERN (insn)) != CLOBBER
31104 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31105 break;
31107 return insn;
31110 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31112 static int
31113 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31115 int pos;
31116 int i;
31117 rtx_insn *tmp;
31118 enum attr_type type, type2;
31120 type = get_attr_type (last_scheduled_insn);
31122 /* Try to issue fixed point divides back-to-back in pairs so they will be
31123 routed to separate execution units and execute in parallel. */
31124 if (type == TYPE_DIV && divide_cnt == 0)
31126 /* First divide has been scheduled. */
31127 divide_cnt = 1;
31129 /* Scan the ready list looking for another divide, if found move it
31130 to the end of the list so it is chosen next. */
31131 pos = lastpos;
31132 while (pos >= 0)
31134 if (recog_memoized (ready[pos]) >= 0
31135 && get_attr_type (ready[pos]) == TYPE_DIV)
31137 tmp = ready[pos];
31138 for (i = pos; i < lastpos; i++)
31139 ready[i] = ready[i + 1];
31140 ready[lastpos] = tmp;
31141 break;
31143 pos--;
31146 else
31148 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31149 divide_cnt = 0;
31151 /* The best dispatch throughput for vector and vector load insns can be
31152 achieved by interleaving a vector and vector load such that they'll
31153 dispatch to the same superslice. If this pairing cannot be achieved
31154 then it is best to pair vector insns together and vector load insns
31155 together.
31157 To aid in this pairing, vec_pairing maintains the current state with
31158 the following values:
31160 0 : Initial state, no vecload/vector pairing has been started.
31162 1 : A vecload or vector insn has been issued and a candidate for
31163 pairing has been found and moved to the end of the ready
31164 list. */
31165 if (type == TYPE_VECLOAD)
31167 /* Issued a vecload. */
31168 if (vec_pairing == 0)
31170 int vecload_pos = -1;
31171 /* We issued a single vecload, look for a vector insn to pair it
31172 with. If one isn't found, try to pair another vecload. */
31173 pos = lastpos;
31174 while (pos >= 0)
31176 if (recog_memoized (ready[pos]) >= 0)
31178 type2 = get_attr_type (ready[pos]);
31179 if (is_power9_pairable_vec_type (type2))
31181 /* Found a vector insn to pair with, move it to the
31182 end of the ready list so it is scheduled next. */
31183 tmp = ready[pos];
31184 for (i = pos; i < lastpos; i++)
31185 ready[i] = ready[i + 1];
31186 ready[lastpos] = tmp;
31187 vec_pairing = 1;
31188 return cached_can_issue_more;
31190 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31191 /* Remember position of first vecload seen. */
31192 vecload_pos = pos;
31194 pos--;
31196 if (vecload_pos >= 0)
31198 /* Didn't find a vector to pair with but did find a vecload,
31199 move it to the end of the ready list. */
31200 tmp = ready[vecload_pos];
31201 for (i = vecload_pos; i < lastpos; i++)
31202 ready[i] = ready[i + 1];
31203 ready[lastpos] = tmp;
31204 vec_pairing = 1;
31205 return cached_can_issue_more;
31209 else if (is_power9_pairable_vec_type (type))
31211 /* Issued a vector operation. */
31212 if (vec_pairing == 0)
31214 int vec_pos = -1;
31215 /* We issued a single vector insn, look for a vecload to pair it
31216 with. If one isn't found, try to pair another vector. */
31217 pos = lastpos;
31218 while (pos >= 0)
31220 if (recog_memoized (ready[pos]) >= 0)
31222 type2 = get_attr_type (ready[pos]);
31223 if (type2 == TYPE_VECLOAD)
31225 /* Found a vecload insn to pair with, move it to the
31226 end of the ready list so it is scheduled next. */
31227 tmp = ready[pos];
31228 for (i = pos; i < lastpos; i++)
31229 ready[i] = ready[i + 1];
31230 ready[lastpos] = tmp;
31231 vec_pairing = 1;
31232 return cached_can_issue_more;
31234 else if (is_power9_pairable_vec_type (type2)
31235 && vec_pos == -1)
31236 /* Remember position of first vector insn seen. */
31237 vec_pos = pos;
31239 pos--;
31241 if (vec_pos >= 0)
31243 /* Didn't find a vecload to pair with but did find a vector
31244 insn, move it to the end of the ready list. */
31245 tmp = ready[vec_pos];
31246 for (i = vec_pos; i < lastpos; i++)
31247 ready[i] = ready[i + 1];
31248 ready[lastpos] = tmp;
31249 vec_pairing = 1;
31250 return cached_can_issue_more;
31255 /* We've either finished a vec/vecload pair, couldn't find an insn to
31256 continue the current pair, or the last insn had nothing to do with
31257 with pairing. In any case, reset the state. */
31258 vec_pairing = 0;
31261 return cached_can_issue_more;
31264 /* We are about to begin issuing insns for this clock cycle. */
31266 static int
31267 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31268 rtx_insn **ready ATTRIBUTE_UNUSED,
31269 int *pn_ready ATTRIBUTE_UNUSED,
31270 int clock_var ATTRIBUTE_UNUSED)
31272 int n_ready = *pn_ready;
31274 if (sched_verbose)
31275 fprintf (dump, "// rs6000_sched_reorder :\n");
31277 /* Reorder the ready list, if the second to last ready insn
31278 is a nonepipeline insn. */
31279 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
31281 if (is_nonpipeline_insn (ready[n_ready - 1])
31282 && (recog_memoized (ready[n_ready - 2]) > 0))
31283 /* Simply swap first two insns. */
31284 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31287 if (rs6000_cpu == PROCESSOR_POWER6)
31288 load_store_pendulum = 0;
31290 return rs6000_issue_rate ();
31293 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31295 static int
31296 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31297 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31299 if (sched_verbose)
31300 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31302 /* For Power6, we need to handle some special cases to try and keep the
31303 store queue from overflowing and triggering expensive flushes.
31305 This code monitors how load and store instructions are being issued
31306 and skews the ready list one way or the other to increase the likelihood
31307 that a desired instruction is issued at the proper time.
31309 A couple of things are done. First, we maintain a "load_store_pendulum"
31310 to track the current state of load/store issue.
31312 - If the pendulum is at zero, then no loads or stores have been
31313 issued in the current cycle so we do nothing.
31315 - If the pendulum is 1, then a single load has been issued in this
31316 cycle and we attempt to locate another load in the ready list to
31317 issue with it.
31319 - If the pendulum is -2, then two stores have already been
31320 issued in this cycle, so we increase the priority of the first load
31321 in the ready list to increase it's likelihood of being chosen first
31322 in the next cycle.
31324 - If the pendulum is -1, then a single store has been issued in this
31325 cycle and we attempt to locate another store in the ready list to
31326 issue with it, preferring a store to an adjacent memory location to
31327 facilitate store pairing in the store queue.
31329 - If the pendulum is 2, then two loads have already been
31330 issued in this cycle, so we increase the priority of the first store
31331 in the ready list to increase it's likelihood of being chosen first
31332 in the next cycle.
31334 - If the pendulum < -2 or > 2, then do nothing.
31336 Note: This code covers the most common scenarios. There exist non
31337 load/store instructions which make use of the LSU and which
31338 would need to be accounted for to strictly model the behavior
31339 of the machine. Those instructions are currently unaccounted
31340 for to help minimize compile time overhead of this code.
31342 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
31344 int pos;
31345 int i;
31346 rtx_insn *tmp;
31347 rtx load_mem, str_mem;
31349 if (is_store_insn (last_scheduled_insn, &str_mem))
31350 /* Issuing a store, swing the load_store_pendulum to the left */
31351 load_store_pendulum--;
31352 else if (is_load_insn (last_scheduled_insn, &load_mem))
31353 /* Issuing a load, swing the load_store_pendulum to the right */
31354 load_store_pendulum++;
31355 else
31356 return cached_can_issue_more;
31358 /* If the pendulum is balanced, or there is only one instruction on
31359 the ready list, then all is well, so return. */
31360 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31361 return cached_can_issue_more;
31363 if (load_store_pendulum == 1)
31365 /* A load has been issued in this cycle. Scan the ready list
31366 for another load to issue with it */
31367 pos = *pn_ready-1;
31369 while (pos >= 0)
31371 if (is_load_insn (ready[pos], &load_mem))
31373 /* Found a load. Move it to the head of the ready list,
31374 and adjust it's priority so that it is more likely to
31375 stay there */
31376 tmp = ready[pos];
31377 for (i=pos; i<*pn_ready-1; i++)
31378 ready[i] = ready[i + 1];
31379 ready[*pn_ready-1] = tmp;
31381 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31382 INSN_PRIORITY (tmp)++;
31383 break;
31385 pos--;
31388 else if (load_store_pendulum == -2)
31390 /* Two stores have been issued in this cycle. Increase the
31391 priority of the first load in the ready list to favor it for
31392 issuing in the next cycle. */
31393 pos = *pn_ready-1;
31395 while (pos >= 0)
31397 if (is_load_insn (ready[pos], &load_mem)
31398 && !sel_sched_p ()
31399 && INSN_PRIORITY_KNOWN (ready[pos]))
31401 INSN_PRIORITY (ready[pos])++;
31403 /* Adjust the pendulum to account for the fact that a load
31404 was found and increased in priority. This is to prevent
31405 increasing the priority of multiple loads */
31406 load_store_pendulum--;
31408 break;
31410 pos--;
31413 else if (load_store_pendulum == -1)
31415 /* A store has been issued in this cycle. Scan the ready list for
31416 another store to issue with it, preferring a store to an adjacent
31417 memory location */
31418 int first_store_pos = -1;
31420 pos = *pn_ready-1;
31422 while (pos >= 0)
31424 if (is_store_insn (ready[pos], &str_mem))
31426 rtx str_mem2;
31427 /* Maintain the index of the first store found on the
31428 list */
31429 if (first_store_pos == -1)
31430 first_store_pos = pos;
31432 if (is_store_insn (last_scheduled_insn, &str_mem2)
31433 && adjacent_mem_locations (str_mem, str_mem2))
31435 /* Found an adjacent store. Move it to the head of the
31436 ready list, and adjust it's priority so that it is
31437 more likely to stay there */
31438 tmp = ready[pos];
31439 for (i=pos; i<*pn_ready-1; i++)
31440 ready[i] = ready[i + 1];
31441 ready[*pn_ready-1] = tmp;
31443 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31444 INSN_PRIORITY (tmp)++;
31446 first_store_pos = -1;
31448 break;
31451 pos--;
31454 if (first_store_pos >= 0)
31456 /* An adjacent store wasn't found, but a non-adjacent store was,
31457 so move the non-adjacent store to the front of the ready
31458 list, and adjust its priority so that it is more likely to
31459 stay there. */
31460 tmp = ready[first_store_pos];
31461 for (i=first_store_pos; i<*pn_ready-1; i++)
31462 ready[i] = ready[i + 1];
31463 ready[*pn_ready-1] = tmp;
31464 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31465 INSN_PRIORITY (tmp)++;
31468 else if (load_store_pendulum == 2)
31470 /* Two loads have been issued in this cycle. Increase the priority
31471 of the first store in the ready list to favor it for issuing in
31472 the next cycle. */
31473 pos = *pn_ready-1;
31475 while (pos >= 0)
31477 if (is_store_insn (ready[pos], &str_mem)
31478 && !sel_sched_p ()
31479 && INSN_PRIORITY_KNOWN (ready[pos]))
31481 INSN_PRIORITY (ready[pos])++;
31483 /* Adjust the pendulum to account for the fact that a store
31484 was found and increased in priority. This is to prevent
31485 increasing the priority of multiple stores */
31486 load_store_pendulum++;
31488 break;
31490 pos--;
31495 /* Do Power9 dependent reordering if necessary. */
31496 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
31497 && recog_memoized (last_scheduled_insn) >= 0)
31498 return power9_sched_reorder2 (ready, *pn_ready - 1);
31500 return cached_can_issue_more;
31503 /* Return whether the presence of INSN causes a dispatch group termination
31504 of group WHICH_GROUP.
31506 If WHICH_GROUP == current_group, this function will return true if INSN
31507 causes the termination of the current group (i.e, the dispatch group to
31508 which INSN belongs). This means that INSN will be the last insn in the
31509 group it belongs to.
31511 If WHICH_GROUP == previous_group, this function will return true if INSN
31512 causes the termination of the previous group (i.e, the dispatch group that
31513 precedes the group to which INSN belongs). This means that INSN will be
31514 the first insn in the group it belongs to). */
31516 static bool
31517 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31519 bool first, last;
31521 if (! insn)
31522 return false;
31524 first = insn_must_be_first_in_group (insn);
31525 last = insn_must_be_last_in_group (insn);
31527 if (first && last)
31528 return true;
31530 if (which_group == current_group)
31531 return last;
31532 else if (which_group == previous_group)
31533 return first;
31535 return false;
31539 static bool
31540 insn_must_be_first_in_group (rtx_insn *insn)
31542 enum attr_type type;
31544 if (!insn
31545 || NOTE_P (insn)
31546 || DEBUG_INSN_P (insn)
31547 || GET_CODE (PATTERN (insn)) == USE
31548 || GET_CODE (PATTERN (insn)) == CLOBBER)
31549 return false;
31551 switch (rs6000_cpu)
31553 case PROCESSOR_POWER5:
31554 if (is_cracked_insn (insn))
31555 return true;
31556 /* FALLTHRU */
31557 case PROCESSOR_POWER4:
31558 if (is_microcoded_insn (insn))
31559 return true;
31561 if (!rs6000_sched_groups)
31562 return false;
31564 type = get_attr_type (insn);
31566 switch (type)
31568 case TYPE_MFCR:
31569 case TYPE_MFCRF:
31570 case TYPE_MTCR:
31571 case TYPE_DELAYED_CR:
31572 case TYPE_CR_LOGICAL:
31573 case TYPE_MTJMPR:
31574 case TYPE_MFJMPR:
31575 case TYPE_DIV:
31576 case TYPE_LOAD_L:
31577 case TYPE_STORE_C:
31578 case TYPE_ISYNC:
31579 case TYPE_SYNC:
31580 return true;
31581 default:
31582 break;
31584 break;
31585 case PROCESSOR_POWER6:
31586 type = get_attr_type (insn);
31588 switch (type)
31590 case TYPE_EXTS:
31591 case TYPE_CNTLZ:
31592 case TYPE_TRAP:
31593 case TYPE_MUL:
31594 case TYPE_INSERT:
31595 case TYPE_FPCOMPARE:
31596 case TYPE_MFCR:
31597 case TYPE_MTCR:
31598 case TYPE_MFJMPR:
31599 case TYPE_MTJMPR:
31600 case TYPE_ISYNC:
31601 case TYPE_SYNC:
31602 case TYPE_LOAD_L:
31603 case TYPE_STORE_C:
31604 return true;
31605 case TYPE_SHIFT:
31606 if (get_attr_dot (insn) == DOT_NO
31607 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31608 return true;
31609 else
31610 break;
31611 case TYPE_DIV:
31612 if (get_attr_size (insn) == SIZE_32)
31613 return true;
31614 else
31615 break;
31616 case TYPE_LOAD:
31617 case TYPE_STORE:
31618 case TYPE_FPLOAD:
31619 case TYPE_FPSTORE:
31620 if (get_attr_update (insn) == UPDATE_YES)
31621 return true;
31622 else
31623 break;
31624 default:
31625 break;
31627 break;
31628 case PROCESSOR_POWER7:
31629 type = get_attr_type (insn);
31631 switch (type)
31633 case TYPE_CR_LOGICAL:
31634 case TYPE_MFCR:
31635 case TYPE_MFCRF:
31636 case TYPE_MTCR:
31637 case TYPE_DIV:
31638 case TYPE_ISYNC:
31639 case TYPE_LOAD_L:
31640 case TYPE_STORE_C:
31641 case TYPE_MFJMPR:
31642 case TYPE_MTJMPR:
31643 return true;
31644 case TYPE_MUL:
31645 case TYPE_SHIFT:
31646 case TYPE_EXTS:
31647 if (get_attr_dot (insn) == DOT_YES)
31648 return true;
31649 else
31650 break;
31651 case TYPE_LOAD:
31652 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31653 || get_attr_update (insn) == UPDATE_YES)
31654 return true;
31655 else
31656 break;
31657 case TYPE_STORE:
31658 case TYPE_FPLOAD:
31659 case TYPE_FPSTORE:
31660 if (get_attr_update (insn) == UPDATE_YES)
31661 return true;
31662 else
31663 break;
31664 default:
31665 break;
31667 break;
31668 case PROCESSOR_POWER8:
31669 type = get_attr_type (insn);
31671 switch (type)
31673 case TYPE_CR_LOGICAL:
31674 case TYPE_DELAYED_CR:
31675 case TYPE_MFCR:
31676 case TYPE_MFCRF:
31677 case TYPE_MTCR:
31678 case TYPE_SYNC:
31679 case TYPE_ISYNC:
31680 case TYPE_LOAD_L:
31681 case TYPE_STORE_C:
31682 case TYPE_VECSTORE:
31683 case TYPE_MFJMPR:
31684 case TYPE_MTJMPR:
31685 return true;
31686 case TYPE_SHIFT:
31687 case TYPE_EXTS:
31688 case TYPE_MUL:
31689 if (get_attr_dot (insn) == DOT_YES)
31690 return true;
31691 else
31692 break;
31693 case TYPE_LOAD:
31694 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31695 || get_attr_update (insn) == UPDATE_YES)
31696 return true;
31697 else
31698 break;
31699 case TYPE_STORE:
31700 if (get_attr_update (insn) == UPDATE_YES
31701 && get_attr_indexed (insn) == INDEXED_YES)
31702 return true;
31703 else
31704 break;
31705 default:
31706 break;
31708 break;
31709 default:
31710 break;
31713 return false;
31716 static bool
31717 insn_must_be_last_in_group (rtx_insn *insn)
31719 enum attr_type type;
31721 if (!insn
31722 || NOTE_P (insn)
31723 || DEBUG_INSN_P (insn)
31724 || GET_CODE (PATTERN (insn)) == USE
31725 || GET_CODE (PATTERN (insn)) == CLOBBER)
31726 return false;
31728 switch (rs6000_cpu) {
31729 case PROCESSOR_POWER4:
31730 case PROCESSOR_POWER5:
31731 if (is_microcoded_insn (insn))
31732 return true;
31734 if (is_branch_slot_insn (insn))
31735 return true;
31737 break;
31738 case PROCESSOR_POWER6:
31739 type = get_attr_type (insn);
31741 switch (type)
31743 case TYPE_EXTS:
31744 case TYPE_CNTLZ:
31745 case TYPE_TRAP:
31746 case TYPE_MUL:
31747 case TYPE_FPCOMPARE:
31748 case TYPE_MFCR:
31749 case TYPE_MTCR:
31750 case TYPE_MFJMPR:
31751 case TYPE_MTJMPR:
31752 case TYPE_ISYNC:
31753 case TYPE_SYNC:
31754 case TYPE_LOAD_L:
31755 case TYPE_STORE_C:
31756 return true;
31757 case TYPE_SHIFT:
31758 if (get_attr_dot (insn) == DOT_NO
31759 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31760 return true;
31761 else
31762 break;
31763 case TYPE_DIV:
31764 if (get_attr_size (insn) == SIZE_32)
31765 return true;
31766 else
31767 break;
31768 default:
31769 break;
31771 break;
31772 case PROCESSOR_POWER7:
31773 type = get_attr_type (insn);
31775 switch (type)
31777 case TYPE_ISYNC:
31778 case TYPE_SYNC:
31779 case TYPE_LOAD_L:
31780 case TYPE_STORE_C:
31781 return true;
31782 case TYPE_LOAD:
31783 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31784 && get_attr_update (insn) == UPDATE_YES)
31785 return true;
31786 else
31787 break;
31788 case TYPE_STORE:
31789 if (get_attr_update (insn) == UPDATE_YES
31790 && get_attr_indexed (insn) == INDEXED_YES)
31791 return true;
31792 else
31793 break;
31794 default:
31795 break;
31797 break;
31798 case PROCESSOR_POWER8:
31799 type = get_attr_type (insn);
31801 switch (type)
31803 case TYPE_MFCR:
31804 case TYPE_MTCR:
31805 case TYPE_ISYNC:
31806 case TYPE_SYNC:
31807 case TYPE_LOAD_L:
31808 case TYPE_STORE_C:
31809 return true;
31810 case TYPE_LOAD:
31811 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31812 && get_attr_update (insn) == UPDATE_YES)
31813 return true;
31814 else
31815 break;
31816 case TYPE_STORE:
31817 if (get_attr_update (insn) == UPDATE_YES
31818 && get_attr_indexed (insn) == INDEXED_YES)
31819 return true;
31820 else
31821 break;
31822 default:
31823 break;
31825 break;
31826 default:
31827 break;
31830 return false;
31833 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31834 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31836 static bool
31837 is_costly_group (rtx *group_insns, rtx next_insn)
31839 int i;
31840 int issue_rate = rs6000_issue_rate ();
31842 for (i = 0; i < issue_rate; i++)
31844 sd_iterator_def sd_it;
31845 dep_t dep;
31846 rtx insn = group_insns[i];
31848 if (!insn)
31849 continue;
31851 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31853 rtx next = DEP_CON (dep);
31855 if (next == next_insn
31856 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31857 return true;
31861 return false;
31864 /* Utility of the function redefine_groups.
31865 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31866 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31867 to keep it "far" (in a separate group) from GROUP_INSNS, following
31868 one of the following schemes, depending on the value of the flag
31869 -minsert_sched_nops = X:
31870 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31871 in order to force NEXT_INSN into a separate group.
31872 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31873 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31874 insertion (has a group just ended, how many vacant issue slots remain in the
31875 last group, and how many dispatch groups were encountered so far). */
31877 static int
31878 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31879 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31880 int *group_count)
31882 rtx nop;
31883 bool force;
31884 int issue_rate = rs6000_issue_rate ();
31885 bool end = *group_end;
31886 int i;
31888 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31889 return can_issue_more;
31891 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31892 return can_issue_more;
31894 force = is_costly_group (group_insns, next_insn);
31895 if (!force)
31896 return can_issue_more;
31898 if (sched_verbose > 6)
31899 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31900 *group_count ,can_issue_more);
31902 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31904 if (*group_end)
31905 can_issue_more = 0;
31907 /* Since only a branch can be issued in the last issue_slot, it is
31908 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31909 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31910 in this case the last nop will start a new group and the branch
31911 will be forced to the new group. */
31912 if (can_issue_more && !is_branch_slot_insn (next_insn))
31913 can_issue_more--;
31915 /* Do we have a special group ending nop? */
31916 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
31917 || rs6000_cpu_attr == CPU_POWER8)
31919 nop = gen_group_ending_nop ();
31920 emit_insn_before (nop, next_insn);
31921 can_issue_more = 0;
31923 else
31924 while (can_issue_more > 0)
31926 nop = gen_nop ();
31927 emit_insn_before (nop, next_insn);
31928 can_issue_more--;
31931 *group_end = true;
31932 return 0;
31935 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31937 int n_nops = rs6000_sched_insert_nops;
31939 /* Nops can't be issued from the branch slot, so the effective
31940 issue_rate for nops is 'issue_rate - 1'. */
31941 if (can_issue_more == 0)
31942 can_issue_more = issue_rate;
31943 can_issue_more--;
31944 if (can_issue_more == 0)
31946 can_issue_more = issue_rate - 1;
31947 (*group_count)++;
31948 end = true;
31949 for (i = 0; i < issue_rate; i++)
31951 group_insns[i] = 0;
31955 while (n_nops > 0)
31957 nop = gen_nop ();
31958 emit_insn_before (nop, next_insn);
31959 if (can_issue_more == issue_rate - 1) /* new group begins */
31960 end = false;
31961 can_issue_more--;
31962 if (can_issue_more == 0)
31964 can_issue_more = issue_rate - 1;
31965 (*group_count)++;
31966 end = true;
31967 for (i = 0; i < issue_rate; i++)
31969 group_insns[i] = 0;
31972 n_nops--;
31975 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31976 can_issue_more++;
31978 /* Is next_insn going to start a new group? */
31979 *group_end
31980 = (end
31981 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31982 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31983 || (can_issue_more < issue_rate &&
31984 insn_terminates_group_p (next_insn, previous_group)));
31985 if (*group_end && end)
31986 (*group_count)--;
31988 if (sched_verbose > 6)
31989 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31990 *group_count, can_issue_more);
31991 return can_issue_more;
31994 return can_issue_more;
31997 /* This function tries to synch the dispatch groups that the compiler "sees"
31998 with the dispatch groups that the processor dispatcher is expected to
31999 form in practice. It tries to achieve this synchronization by forcing the
32000 estimated processor grouping on the compiler (as opposed to the function
32001 'pad_goups' which tries to force the scheduler's grouping on the processor).
32003 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32004 examines the (estimated) dispatch groups that will be formed by the processor
32005 dispatcher. It marks these group boundaries to reflect the estimated
32006 processor grouping, overriding the grouping that the scheduler had marked.
32007 Depending on the value of the flag '-minsert-sched-nops' this function can
32008 force certain insns into separate groups or force a certain distance between
32009 them by inserting nops, for example, if there exists a "costly dependence"
32010 between the insns.
32012 The function estimates the group boundaries that the processor will form as
32013 follows: It keeps track of how many vacant issue slots are available after
32014 each insn. A subsequent insn will start a new group if one of the following
32015 4 cases applies:
32016 - no more vacant issue slots remain in the current dispatch group.
32017 - only the last issue slot, which is the branch slot, is vacant, but the next
32018 insn is not a branch.
32019 - only the last 2 or less issue slots, including the branch slot, are vacant,
32020 which means that a cracked insn (which occupies two issue slots) can't be
32021 issued in this group.
32022 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32023 start a new group. */
32025 static int
32026 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32027 rtx_insn *tail)
32029 rtx_insn *insn, *next_insn;
32030 int issue_rate;
32031 int can_issue_more;
32032 int slot, i;
32033 bool group_end;
32034 int group_count = 0;
32035 rtx *group_insns;
32037 /* Initialize. */
32038 issue_rate = rs6000_issue_rate ();
32039 group_insns = XALLOCAVEC (rtx, issue_rate);
32040 for (i = 0; i < issue_rate; i++)
32042 group_insns[i] = 0;
32044 can_issue_more = issue_rate;
32045 slot = 0;
32046 insn = get_next_active_insn (prev_head_insn, tail);
32047 group_end = false;
32049 while (insn != NULL_RTX)
32051 slot = (issue_rate - can_issue_more);
32052 group_insns[slot] = insn;
32053 can_issue_more =
32054 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32055 if (insn_terminates_group_p (insn, current_group))
32056 can_issue_more = 0;
32058 next_insn = get_next_active_insn (insn, tail);
32059 if (next_insn == NULL_RTX)
32060 return group_count + 1;
32062 /* Is next_insn going to start a new group? */
32063 group_end
32064 = (can_issue_more == 0
32065 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32066 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32067 || (can_issue_more < issue_rate &&
32068 insn_terminates_group_p (next_insn, previous_group)));
32070 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32071 next_insn, &group_end, can_issue_more,
32072 &group_count);
32074 if (group_end)
32076 group_count++;
32077 can_issue_more = 0;
32078 for (i = 0; i < issue_rate; i++)
32080 group_insns[i] = 0;
32084 if (GET_MODE (next_insn) == TImode && can_issue_more)
32085 PUT_MODE (next_insn, VOIDmode);
32086 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32087 PUT_MODE (next_insn, TImode);
32089 insn = next_insn;
32090 if (can_issue_more == 0)
32091 can_issue_more = issue_rate;
32092 } /* while */
32094 return group_count;
32097 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32098 dispatch group boundaries that the scheduler had marked. Pad with nops
32099 any dispatch groups which have vacant issue slots, in order to force the
32100 scheduler's grouping on the processor dispatcher. The function
32101 returns the number of dispatch groups found. */
32103 static int
32104 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32105 rtx_insn *tail)
32107 rtx_insn *insn, *next_insn;
32108 rtx nop;
32109 int issue_rate;
32110 int can_issue_more;
32111 int group_end;
32112 int group_count = 0;
32114 /* Initialize issue_rate. */
32115 issue_rate = rs6000_issue_rate ();
32116 can_issue_more = issue_rate;
32118 insn = get_next_active_insn (prev_head_insn, tail);
32119 next_insn = get_next_active_insn (insn, tail);
32121 while (insn != NULL_RTX)
32123 can_issue_more =
32124 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32126 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32128 if (next_insn == NULL_RTX)
32129 break;
32131 if (group_end)
32133 /* If the scheduler had marked group termination at this location
32134 (between insn and next_insn), and neither insn nor next_insn will
32135 force group termination, pad the group with nops to force group
32136 termination. */
32137 if (can_issue_more
32138 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32139 && !insn_terminates_group_p (insn, current_group)
32140 && !insn_terminates_group_p (next_insn, previous_group))
32142 if (!is_branch_slot_insn (next_insn))
32143 can_issue_more--;
32145 while (can_issue_more)
32147 nop = gen_nop ();
32148 emit_insn_before (nop, next_insn);
32149 can_issue_more--;
32153 can_issue_more = issue_rate;
32154 group_count++;
32157 insn = next_insn;
32158 next_insn = get_next_active_insn (insn, tail);
32161 return group_count;
32164 /* We're beginning a new block. Initialize data structures as necessary. */
32166 static void
32167 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32168 int sched_verbose ATTRIBUTE_UNUSED,
32169 int max_ready ATTRIBUTE_UNUSED)
32171 last_scheduled_insn = NULL;
32172 load_store_pendulum = 0;
32173 divide_cnt = 0;
32174 vec_pairing = 0;
32177 /* The following function is called at the end of scheduling BB.
32178 After reload, it inserts nops at insn group bundling. */
32180 static void
32181 rs6000_sched_finish (FILE *dump, int sched_verbose)
32183 int n_groups;
32185 if (sched_verbose)
32186 fprintf (dump, "=== Finishing schedule.\n");
32188 if (reload_completed && rs6000_sched_groups)
32190 /* Do not run sched_finish hook when selective scheduling enabled. */
32191 if (sel_sched_p ())
32192 return;
32194 if (rs6000_sched_insert_nops == sched_finish_none)
32195 return;
32197 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32198 n_groups = pad_groups (dump, sched_verbose,
32199 current_sched_info->prev_head,
32200 current_sched_info->next_tail);
32201 else
32202 n_groups = redefine_groups (dump, sched_verbose,
32203 current_sched_info->prev_head,
32204 current_sched_info->next_tail);
32206 if (sched_verbose >= 6)
32208 fprintf (dump, "ngroups = %d\n", n_groups);
32209 print_rtl (dump, current_sched_info->prev_head);
32210 fprintf (dump, "Done finish_sched\n");
32215 struct rs6000_sched_context
32217 short cached_can_issue_more;
32218 rtx_insn *last_scheduled_insn;
32219 int load_store_pendulum;
32220 int divide_cnt;
32221 int vec_pairing;
32224 typedef struct rs6000_sched_context rs6000_sched_context_def;
32225 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32227 /* Allocate store for new scheduling context. */
32228 static void *
32229 rs6000_alloc_sched_context (void)
32231 return xmalloc (sizeof (rs6000_sched_context_def));
32234 /* If CLEAN_P is true then initializes _SC with clean data,
32235 and from the global context otherwise. */
32236 static void
32237 rs6000_init_sched_context (void *_sc, bool clean_p)
32239 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32241 if (clean_p)
32243 sc->cached_can_issue_more = 0;
32244 sc->last_scheduled_insn = NULL;
32245 sc->load_store_pendulum = 0;
32246 sc->divide_cnt = 0;
32247 sc->vec_pairing = 0;
32249 else
32251 sc->cached_can_issue_more = cached_can_issue_more;
32252 sc->last_scheduled_insn = last_scheduled_insn;
32253 sc->load_store_pendulum = load_store_pendulum;
32254 sc->divide_cnt = divide_cnt;
32255 sc->vec_pairing = vec_pairing;
32259 /* Sets the global scheduling context to the one pointed to by _SC. */
32260 static void
32261 rs6000_set_sched_context (void *_sc)
32263 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32265 gcc_assert (sc != NULL);
32267 cached_can_issue_more = sc->cached_can_issue_more;
32268 last_scheduled_insn = sc->last_scheduled_insn;
32269 load_store_pendulum = sc->load_store_pendulum;
32270 divide_cnt = sc->divide_cnt;
32271 vec_pairing = sc->vec_pairing;
32274 /* Free _SC. */
32275 static void
32276 rs6000_free_sched_context (void *_sc)
32278 gcc_assert (_sc != NULL);
32280 free (_sc);
32283 static bool
32284 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32286 switch (get_attr_type (insn))
32288 case TYPE_DIV:
32289 case TYPE_SDIV:
32290 case TYPE_DDIV:
32291 case TYPE_VECDIV:
32292 case TYPE_SSQRT:
32293 case TYPE_DSQRT:
32294 return false;
32296 default:
32297 return true;
32301 /* Length in units of the trampoline for entering a nested function. */
32304 rs6000_trampoline_size (void)
32306 int ret = 0;
32308 switch (DEFAULT_ABI)
32310 default:
32311 gcc_unreachable ();
32313 case ABI_AIX:
32314 ret = (TARGET_32BIT) ? 12 : 24;
32315 break;
32317 case ABI_ELFv2:
32318 gcc_assert (!TARGET_32BIT);
32319 ret = 32;
32320 break;
32322 case ABI_DARWIN:
32323 case ABI_V4:
32324 ret = (TARGET_32BIT) ? 40 : 48;
32325 break;
32328 return ret;
32331 /* Emit RTL insns to initialize the variable parts of a trampoline.
32332 FNADDR is an RTX for the address of the function's pure code.
32333 CXT is an RTX for the static chain value for the function. */
32335 static void
32336 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32338 int regsize = (TARGET_32BIT) ? 4 : 8;
32339 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32340 rtx ctx_reg = force_reg (Pmode, cxt);
32341 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32343 switch (DEFAULT_ABI)
32345 default:
32346 gcc_unreachable ();
32348 /* Under AIX, just build the 3 word function descriptor */
32349 case ABI_AIX:
32351 rtx fnmem, fn_reg, toc_reg;
32353 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32354 error ("you cannot take the address of a nested function if you use "
32355 "the %qs option", "-mno-pointers-to-nested-functions");
32357 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32358 fn_reg = gen_reg_rtx (Pmode);
32359 toc_reg = gen_reg_rtx (Pmode);
32361 /* Macro to shorten the code expansions below. */
32362 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32364 m_tramp = replace_equiv_address (m_tramp, addr);
32366 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32367 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32368 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32369 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32370 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32372 # undef MEM_PLUS
32374 break;
32376 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32377 case ABI_ELFv2:
32378 case ABI_DARWIN:
32379 case ABI_V4:
32380 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32381 LCT_NORMAL, VOIDmode,
32382 addr, Pmode,
32383 GEN_INT (rs6000_trampoline_size ()), SImode,
32384 fnaddr, Pmode,
32385 ctx_reg, Pmode);
32386 break;
32391 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32392 identifier as an argument, so the front end shouldn't look it up. */
32394 static bool
32395 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32397 return is_attribute_p ("altivec", attr_id);
32400 /* Handle the "altivec" attribute. The attribute may have
32401 arguments as follows:
32403 __attribute__((altivec(vector__)))
32404 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32405 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32407 and may appear more than once (e.g., 'vector bool char') in a
32408 given declaration. */
32410 static tree
32411 rs6000_handle_altivec_attribute (tree *node,
32412 tree name ATTRIBUTE_UNUSED,
32413 tree args,
32414 int flags ATTRIBUTE_UNUSED,
32415 bool *no_add_attrs)
32417 tree type = *node, result = NULL_TREE;
32418 machine_mode mode;
32419 int unsigned_p;
32420 char altivec_type
32421 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32422 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32423 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32424 : '?');
32426 while (POINTER_TYPE_P (type)
32427 || TREE_CODE (type) == FUNCTION_TYPE
32428 || TREE_CODE (type) == METHOD_TYPE
32429 || TREE_CODE (type) == ARRAY_TYPE)
32430 type = TREE_TYPE (type);
32432 mode = TYPE_MODE (type);
32434 /* Check for invalid AltiVec type qualifiers. */
32435 if (type == long_double_type_node)
32436 error ("use of %<long double%> in AltiVec types is invalid");
32437 else if (type == boolean_type_node)
32438 error ("use of boolean types in AltiVec types is invalid");
32439 else if (TREE_CODE (type) == COMPLEX_TYPE)
32440 error ("use of %<complex%> in AltiVec types is invalid");
32441 else if (DECIMAL_FLOAT_MODE_P (mode))
32442 error ("use of decimal floating point types in AltiVec types is invalid");
32443 else if (!TARGET_VSX)
32445 if (type == long_unsigned_type_node || type == long_integer_type_node)
32447 if (TARGET_64BIT)
32448 error ("use of %<long%> in AltiVec types is invalid for "
32449 "64-bit code without %qs", "-mvsx");
32450 else if (rs6000_warn_altivec_long)
32451 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32452 "use %<int%>");
32454 else if (type == long_long_unsigned_type_node
32455 || type == long_long_integer_type_node)
32456 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32457 "-mvsx");
32458 else if (type == double_type_node)
32459 error ("use of %<double%> in AltiVec types is invalid without %qs",
32460 "-mvsx");
32463 switch (altivec_type)
32465 case 'v':
32466 unsigned_p = TYPE_UNSIGNED (type);
32467 switch (mode)
32469 case E_TImode:
32470 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32471 break;
32472 case E_DImode:
32473 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32474 break;
32475 case E_SImode:
32476 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32477 break;
32478 case E_HImode:
32479 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32480 break;
32481 case E_QImode:
32482 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32483 break;
32484 case E_SFmode: result = V4SF_type_node; break;
32485 case E_DFmode: result = V2DF_type_node; break;
32486 /* If the user says 'vector int bool', we may be handed the 'bool'
32487 attribute _before_ the 'vector' attribute, and so select the
32488 proper type in the 'b' case below. */
32489 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32490 case E_V2DImode: case E_V2DFmode:
32491 result = type;
32492 default: break;
32494 break;
32495 case 'b':
32496 switch (mode)
32498 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32499 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32500 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32501 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32502 default: break;
32504 break;
32505 case 'p':
32506 switch (mode)
32508 case E_V8HImode: result = pixel_V8HI_type_node;
32509 default: break;
32511 default: break;
32514 /* Propagate qualifiers attached to the element type
32515 onto the vector type. */
32516 if (result && result != type && TYPE_QUALS (type))
32517 result = build_qualified_type (result, TYPE_QUALS (type));
32519 *no_add_attrs = true; /* No need to hang on to the attribute. */
32521 if (result)
32522 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32524 return NULL_TREE;
32527 /* AltiVec defines four built-in scalar types that serve as vector
32528 elements; we must teach the compiler how to mangle them. */
32530 static const char *
32531 rs6000_mangle_type (const_tree type)
32533 type = TYPE_MAIN_VARIANT (type);
32535 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32536 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32537 return NULL;
32539 if (type == bool_char_type_node) return "U6__boolc";
32540 if (type == bool_short_type_node) return "U6__bools";
32541 if (type == pixel_type_node) return "u7__pixel";
32542 if (type == bool_int_type_node) return "U6__booli";
32543 if (type == bool_long_type_node) return "U6__booll";
32545 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
32546 "g" for IBM extended double, no matter whether it is long double (using
32547 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
32548 if (TARGET_FLOAT128_TYPE)
32550 if (type == ieee128_float_type_node)
32551 return "U10__float128";
32553 if (TARGET_LONG_DOUBLE_128)
32555 if (type == long_double_type_node)
32556 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
32558 if (type == ibm128_float_type_node)
32559 return "g";
32563 /* Mangle IBM extended float long double as `g' (__float128) on
32564 powerpc*-linux where long-double-64 previously was the default. */
32565 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
32566 && TARGET_ELF
32567 && TARGET_LONG_DOUBLE_128
32568 && !TARGET_IEEEQUAD)
32569 return "g";
32571 /* For all other types, use normal C++ mangling. */
32572 return NULL;
32575 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32576 struct attribute_spec.handler. */
32578 static tree
32579 rs6000_handle_longcall_attribute (tree *node, tree name,
32580 tree args ATTRIBUTE_UNUSED,
32581 int flags ATTRIBUTE_UNUSED,
32582 bool *no_add_attrs)
32584 if (TREE_CODE (*node) != FUNCTION_TYPE
32585 && TREE_CODE (*node) != FIELD_DECL
32586 && TREE_CODE (*node) != TYPE_DECL)
32588 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32589 name);
32590 *no_add_attrs = true;
32593 return NULL_TREE;
32596 /* Set longcall attributes on all functions declared when
32597 rs6000_default_long_calls is true. */
32598 static void
32599 rs6000_set_default_type_attributes (tree type)
32601 if (rs6000_default_long_calls
32602 && (TREE_CODE (type) == FUNCTION_TYPE
32603 || TREE_CODE (type) == METHOD_TYPE))
32604 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32605 NULL_TREE,
32606 TYPE_ATTRIBUTES (type));
32608 #if TARGET_MACHO
32609 darwin_set_default_type_attributes (type);
32610 #endif
32613 /* Return a reference suitable for calling a function with the
32614 longcall attribute. */
32617 rs6000_longcall_ref (rtx call_ref)
32619 const char *call_name;
32620 tree node;
32622 if (GET_CODE (call_ref) != SYMBOL_REF)
32623 return call_ref;
32625 /* System V adds '.' to the internal name, so skip them. */
32626 call_name = XSTR (call_ref, 0);
32627 if (*call_name == '.')
32629 while (*call_name == '.')
32630 call_name++;
32632 node = get_identifier (call_name);
32633 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32636 return force_reg (Pmode, call_ref);
32639 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32640 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32641 #endif
32643 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32644 struct attribute_spec.handler. */
32645 static tree
32646 rs6000_handle_struct_attribute (tree *node, tree name,
32647 tree args ATTRIBUTE_UNUSED,
32648 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32650 tree *type = NULL;
32651 if (DECL_P (*node))
32653 if (TREE_CODE (*node) == TYPE_DECL)
32654 type = &TREE_TYPE (*node);
32656 else
32657 type = node;
32659 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32660 || TREE_CODE (*type) == UNION_TYPE)))
32662 warning (OPT_Wattributes, "%qE attribute ignored", name);
32663 *no_add_attrs = true;
32666 else if ((is_attribute_p ("ms_struct", name)
32667 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32668 || ((is_attribute_p ("gcc_struct", name)
32669 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32671 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32672 name);
32673 *no_add_attrs = true;
32676 return NULL_TREE;
32679 static bool
32680 rs6000_ms_bitfield_layout_p (const_tree record_type)
32682 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32683 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32684 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32687 #ifdef USING_ELFOS_H
32689 /* A get_unnamed_section callback, used for switching to toc_section. */
32691 static void
32692 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32694 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32695 && TARGET_MINIMAL_TOC)
32697 if (!toc_initialized)
32699 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32700 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32701 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32702 fprintf (asm_out_file, "\t.tc ");
32703 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32704 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32705 fprintf (asm_out_file, "\n");
32707 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32708 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32709 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32710 fprintf (asm_out_file, " = .+32768\n");
32711 toc_initialized = 1;
32713 else
32714 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32716 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32718 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32719 if (!toc_initialized)
32721 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32722 toc_initialized = 1;
32725 else
32727 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32728 if (!toc_initialized)
32730 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32731 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32732 fprintf (asm_out_file, " = .+32768\n");
32733 toc_initialized = 1;
32738 /* Implement TARGET_ASM_INIT_SECTIONS. */
32740 static void
32741 rs6000_elf_asm_init_sections (void)
32743 toc_section
32744 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32746 sdata2_section
32747 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32748 SDATA2_SECTION_ASM_OP);
32751 /* Implement TARGET_SELECT_RTX_SECTION. */
32753 static section *
32754 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32755 unsigned HOST_WIDE_INT align)
32757 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32758 return toc_section;
32759 else
32760 return default_elf_select_rtx_section (mode, x, align);
32763 /* For a SYMBOL_REF, set generic flags and then perform some
32764 target-specific processing.
32766 When the AIX ABI is requested on a non-AIX system, replace the
32767 function name with the real name (with a leading .) rather than the
32768 function descriptor name. This saves a lot of overriding code to
32769 read the prefixes. */
32771 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32772 static void
32773 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32775 default_encode_section_info (decl, rtl, first);
32777 if (first
32778 && TREE_CODE (decl) == FUNCTION_DECL
32779 && !TARGET_AIX
32780 && DEFAULT_ABI == ABI_AIX)
32782 rtx sym_ref = XEXP (rtl, 0);
32783 size_t len = strlen (XSTR (sym_ref, 0));
32784 char *str = XALLOCAVEC (char, len + 2);
32785 str[0] = '.';
32786 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32787 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32791 static inline bool
32792 compare_section_name (const char *section, const char *templ)
32794 int len;
32796 len = strlen (templ);
32797 return (strncmp (section, templ, len) == 0
32798 && (section[len] == 0 || section[len] == '.'));
32801 bool
32802 rs6000_elf_in_small_data_p (const_tree decl)
32804 if (rs6000_sdata == SDATA_NONE)
32805 return false;
32807 /* We want to merge strings, so we never consider them small data. */
32808 if (TREE_CODE (decl) == STRING_CST)
32809 return false;
32811 /* Functions are never in the small data area. */
32812 if (TREE_CODE (decl) == FUNCTION_DECL)
32813 return false;
32815 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32817 const char *section = DECL_SECTION_NAME (decl);
32818 if (compare_section_name (section, ".sdata")
32819 || compare_section_name (section, ".sdata2")
32820 || compare_section_name (section, ".gnu.linkonce.s")
32821 || compare_section_name (section, ".sbss")
32822 || compare_section_name (section, ".sbss2")
32823 || compare_section_name (section, ".gnu.linkonce.sb")
32824 || strcmp (section, ".PPC.EMB.sdata0") == 0
32825 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32826 return true;
32828 else
32830 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32832 if (size > 0
32833 && size <= g_switch_value
32834 /* If it's not public, and we're not going to reference it there,
32835 there's no need to put it in the small data section. */
32836 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32837 return true;
32840 return false;
32843 #endif /* USING_ELFOS_H */
32845 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32847 static bool
32848 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32850 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32853 /* Do not place thread-local symbols refs in the object blocks. */
32855 static bool
32856 rs6000_use_blocks_for_decl_p (const_tree decl)
32858 return !DECL_THREAD_LOCAL_P (decl);
32861 /* Return a REG that occurs in ADDR with coefficient 1.
32862 ADDR can be effectively incremented by incrementing REG.
32864 r0 is special and we must not select it as an address
32865 register by this routine since our caller will try to
32866 increment the returned register via an "la" instruction. */
32869 find_addr_reg (rtx addr)
32871 while (GET_CODE (addr) == PLUS)
32873 if (GET_CODE (XEXP (addr, 0)) == REG
32874 && REGNO (XEXP (addr, 0)) != 0)
32875 addr = XEXP (addr, 0);
32876 else if (GET_CODE (XEXP (addr, 1)) == REG
32877 && REGNO (XEXP (addr, 1)) != 0)
32878 addr = XEXP (addr, 1);
32879 else if (CONSTANT_P (XEXP (addr, 0)))
32880 addr = XEXP (addr, 1);
32881 else if (CONSTANT_P (XEXP (addr, 1)))
32882 addr = XEXP (addr, 0);
32883 else
32884 gcc_unreachable ();
32886 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32887 return addr;
32890 void
32891 rs6000_fatal_bad_address (rtx op)
32893 fatal_insn ("bad address", op);
32896 #if TARGET_MACHO
32898 typedef struct branch_island_d {
32899 tree function_name;
32900 tree label_name;
32901 int line_number;
32902 } branch_island;
32905 static vec<branch_island, va_gc> *branch_islands;
32907 /* Remember to generate a branch island for far calls to the given
32908 function. */
32910 static void
32911 add_compiler_branch_island (tree label_name, tree function_name,
32912 int line_number)
32914 branch_island bi = {function_name, label_name, line_number};
32915 vec_safe_push (branch_islands, bi);
32918 /* Generate far-jump branch islands for everything recorded in
32919 branch_islands. Invoked immediately after the last instruction of
32920 the epilogue has been emitted; the branch islands must be appended
32921 to, and contiguous with, the function body. Mach-O stubs are
32922 generated in machopic_output_stub(). */
32924 static void
32925 macho_branch_islands (void)
32927 char tmp_buf[512];
32929 while (!vec_safe_is_empty (branch_islands))
32931 branch_island *bi = &branch_islands->last ();
32932 const char *label = IDENTIFIER_POINTER (bi->label_name);
32933 const char *name = IDENTIFIER_POINTER (bi->function_name);
32934 char name_buf[512];
32935 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32936 if (name[0] == '*' || name[0] == '&')
32937 strcpy (name_buf, name+1);
32938 else
32940 name_buf[0] = '_';
32941 strcpy (name_buf+1, name);
32943 strcpy (tmp_buf, "\n");
32944 strcat (tmp_buf, label);
32945 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32946 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32947 dbxout_stabd (N_SLINE, bi->line_number);
32948 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32949 if (flag_pic)
32951 if (TARGET_LINK_STACK)
32953 char name[32];
32954 get_ppc476_thunk_name (name);
32955 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32956 strcat (tmp_buf, name);
32957 strcat (tmp_buf, "\n");
32958 strcat (tmp_buf, label);
32959 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32961 else
32963 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32964 strcat (tmp_buf, label);
32965 strcat (tmp_buf, "_pic\n");
32966 strcat (tmp_buf, label);
32967 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32970 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32971 strcat (tmp_buf, name_buf);
32972 strcat (tmp_buf, " - ");
32973 strcat (tmp_buf, label);
32974 strcat (tmp_buf, "_pic)\n");
32976 strcat (tmp_buf, "\tmtlr r0\n");
32978 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32979 strcat (tmp_buf, name_buf);
32980 strcat (tmp_buf, " - ");
32981 strcat (tmp_buf, label);
32982 strcat (tmp_buf, "_pic)\n");
32984 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32986 else
32988 strcat (tmp_buf, ":\nlis r12,hi16(");
32989 strcat (tmp_buf, name_buf);
32990 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32991 strcat (tmp_buf, name_buf);
32992 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32994 output_asm_insn (tmp_buf, 0);
32995 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32996 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32997 dbxout_stabd (N_SLINE, bi->line_number);
32998 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32999 branch_islands->pop ();
33003 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33004 already there or not. */
33006 static int
33007 no_previous_def (tree function_name)
33009 branch_island *bi;
33010 unsigned ix;
33012 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33013 if (function_name == bi->function_name)
33014 return 0;
33015 return 1;
33018 /* GET_PREV_LABEL gets the label name from the previous definition of
33019 the function. */
33021 static tree
33022 get_prev_label (tree function_name)
33024 branch_island *bi;
33025 unsigned ix;
33027 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33028 if (function_name == bi->function_name)
33029 return bi->label_name;
33030 return NULL_TREE;
33033 /* INSN is either a function call or a millicode call. It may have an
33034 unconditional jump in its delay slot.
33036 CALL_DEST is the routine we are calling. */
33038 char *
33039 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
33040 int cookie_operand_number)
33042 static char buf[256];
33043 if (darwin_emit_branch_islands
33044 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
33045 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
33047 tree labelname;
33048 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
33050 if (no_previous_def (funname))
33052 rtx label_rtx = gen_label_rtx ();
33053 char *label_buf, temp_buf[256];
33054 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
33055 CODE_LABEL_NUMBER (label_rtx));
33056 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
33057 labelname = get_identifier (label_buf);
33058 add_compiler_branch_island (labelname, funname, insn_line (insn));
33060 else
33061 labelname = get_prev_label (funname);
33063 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
33064 instruction will reach 'foo', otherwise link as 'bl L42'".
33065 "L42" should be a 'branch island', that will do a far jump to
33066 'foo'. Branch islands are generated in
33067 macho_branch_islands(). */
33068 sprintf (buf, "jbsr %%z%d,%.246s",
33069 dest_operand_number, IDENTIFIER_POINTER (labelname));
33071 else
33072 sprintf (buf, "bl %%z%d", dest_operand_number);
33073 return buf;
33076 /* Generate PIC and indirect symbol stubs. */
33078 void
33079 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33081 unsigned int length;
33082 char *symbol_name, *lazy_ptr_name;
33083 char *local_label_0;
33084 static int label = 0;
33086 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33087 symb = (*targetm.strip_name_encoding) (symb);
33090 length = strlen (symb);
33091 symbol_name = XALLOCAVEC (char, length + 32);
33092 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33094 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33095 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33097 if (flag_pic == 2)
33098 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33099 else
33100 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33102 if (flag_pic == 2)
33104 fprintf (file, "\t.align 5\n");
33106 fprintf (file, "%s:\n", stub);
33107 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33109 label++;
33110 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33111 sprintf (local_label_0, "\"L%011d$spb\"", label);
33113 fprintf (file, "\tmflr r0\n");
33114 if (TARGET_LINK_STACK)
33116 char name[32];
33117 get_ppc476_thunk_name (name);
33118 fprintf (file, "\tbl %s\n", name);
33119 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33121 else
33123 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33124 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33126 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33127 lazy_ptr_name, local_label_0);
33128 fprintf (file, "\tmtlr r0\n");
33129 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33130 (TARGET_64BIT ? "ldu" : "lwzu"),
33131 lazy_ptr_name, local_label_0);
33132 fprintf (file, "\tmtctr r12\n");
33133 fprintf (file, "\tbctr\n");
33135 else
33137 fprintf (file, "\t.align 4\n");
33139 fprintf (file, "%s:\n", stub);
33140 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33142 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33143 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33144 (TARGET_64BIT ? "ldu" : "lwzu"),
33145 lazy_ptr_name);
33146 fprintf (file, "\tmtctr r12\n");
33147 fprintf (file, "\tbctr\n");
33150 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33151 fprintf (file, "%s:\n", lazy_ptr_name);
33152 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33153 fprintf (file, "%sdyld_stub_binding_helper\n",
33154 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33157 /* Legitimize PIC addresses. If the address is already
33158 position-independent, we return ORIG. Newly generated
33159 position-independent addresses go into a reg. This is REG if non
33160 zero, otherwise we allocate register(s) as necessary. */
33162 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33165 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33166 rtx reg)
33168 rtx base, offset;
33170 if (reg == NULL && !reload_completed)
33171 reg = gen_reg_rtx (Pmode);
33173 if (GET_CODE (orig) == CONST)
33175 rtx reg_temp;
33177 if (GET_CODE (XEXP (orig, 0)) == PLUS
33178 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33179 return orig;
33181 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33183 /* Use a different reg for the intermediate value, as
33184 it will be marked UNCHANGING. */
33185 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33186 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33187 Pmode, reg_temp);
33188 offset =
33189 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33190 Pmode, reg);
33192 if (GET_CODE (offset) == CONST_INT)
33194 if (SMALL_INT (offset))
33195 return plus_constant (Pmode, base, INTVAL (offset));
33196 else if (!reload_completed)
33197 offset = force_reg (Pmode, offset);
33198 else
33200 rtx mem = force_const_mem (Pmode, orig);
33201 return machopic_legitimize_pic_address (mem, Pmode, reg);
33204 return gen_rtx_PLUS (Pmode, base, offset);
33207 /* Fall back on generic machopic code. */
33208 return machopic_legitimize_pic_address (orig, mode, reg);
33211 /* Output a .machine directive for the Darwin assembler, and call
33212 the generic start_file routine. */
33214 static void
33215 rs6000_darwin_file_start (void)
33217 static const struct
33219 const char *arg;
33220 const char *name;
33221 HOST_WIDE_INT if_set;
33222 } mapping[] = {
33223 { "ppc64", "ppc64", MASK_64BIT },
33224 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33225 { "power4", "ppc970", 0 },
33226 { "G5", "ppc970", 0 },
33227 { "7450", "ppc7450", 0 },
33228 { "7400", "ppc7400", MASK_ALTIVEC },
33229 { "G4", "ppc7400", 0 },
33230 { "750", "ppc750", 0 },
33231 { "740", "ppc750", 0 },
33232 { "G3", "ppc750", 0 },
33233 { "604e", "ppc604e", 0 },
33234 { "604", "ppc604", 0 },
33235 { "603e", "ppc603", 0 },
33236 { "603", "ppc603", 0 },
33237 { "601", "ppc601", 0 },
33238 { NULL, "ppc", 0 } };
33239 const char *cpu_id = "";
33240 size_t i;
33242 rs6000_file_start ();
33243 darwin_file_start ();
33245 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33247 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33248 cpu_id = rs6000_default_cpu;
33250 if (global_options_set.x_rs6000_cpu_index)
33251 cpu_id = processor_target_table[rs6000_cpu_index].name;
33253 /* Look through the mapping array. Pick the first name that either
33254 matches the argument, has a bit set in IF_SET that is also set
33255 in the target flags, or has a NULL name. */
33257 i = 0;
33258 while (mapping[i].arg != NULL
33259 && strcmp (mapping[i].arg, cpu_id) != 0
33260 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33261 i++;
33263 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33266 #endif /* TARGET_MACHO */
33268 #if TARGET_ELF
33269 static int
33270 rs6000_elf_reloc_rw_mask (void)
33272 if (flag_pic)
33273 return 3;
33274 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33275 return 2;
33276 else
33277 return 0;
33280 /* Record an element in the table of global constructors. SYMBOL is
33281 a SYMBOL_REF of the function to be called; PRIORITY is a number
33282 between 0 and MAX_INIT_PRIORITY.
33284 This differs from default_named_section_asm_out_constructor in
33285 that we have special handling for -mrelocatable. */
33287 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33288 static void
33289 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33291 const char *section = ".ctors";
33292 char buf[18];
33294 if (priority != DEFAULT_INIT_PRIORITY)
33296 sprintf (buf, ".ctors.%.5u",
33297 /* Invert the numbering so the linker puts us in the proper
33298 order; constructors are run from right to left, and the
33299 linker sorts in increasing order. */
33300 MAX_INIT_PRIORITY - priority);
33301 section = buf;
33304 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33305 assemble_align (POINTER_SIZE);
33307 if (DEFAULT_ABI == ABI_V4
33308 && (TARGET_RELOCATABLE || flag_pic > 1))
33310 fputs ("\t.long (", asm_out_file);
33311 output_addr_const (asm_out_file, symbol);
33312 fputs (")@fixup\n", asm_out_file);
33314 else
33315 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33318 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33319 static void
33320 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33322 const char *section = ".dtors";
33323 char buf[18];
33325 if (priority != DEFAULT_INIT_PRIORITY)
33327 sprintf (buf, ".dtors.%.5u",
33328 /* Invert the numbering so the linker puts us in the proper
33329 order; constructors are run from right to left, and the
33330 linker sorts in increasing order. */
33331 MAX_INIT_PRIORITY - priority);
33332 section = buf;
33335 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33336 assemble_align (POINTER_SIZE);
33338 if (DEFAULT_ABI == ABI_V4
33339 && (TARGET_RELOCATABLE || flag_pic > 1))
33341 fputs ("\t.long (", asm_out_file);
33342 output_addr_const (asm_out_file, symbol);
33343 fputs (")@fixup\n", asm_out_file);
33345 else
33346 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33349 void
33350 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33352 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33354 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33355 ASM_OUTPUT_LABEL (file, name);
33356 fputs (DOUBLE_INT_ASM_OP, file);
33357 rs6000_output_function_entry (file, name);
33358 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33359 if (DOT_SYMBOLS)
33361 fputs ("\t.size\t", file);
33362 assemble_name (file, name);
33363 fputs (",24\n\t.type\t.", file);
33364 assemble_name (file, name);
33365 fputs (",@function\n", file);
33366 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33368 fputs ("\t.globl\t.", file);
33369 assemble_name (file, name);
33370 putc ('\n', file);
33373 else
33374 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33375 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33376 rs6000_output_function_entry (file, name);
33377 fputs (":\n", file);
33378 return;
33381 int uses_toc;
33382 if (DEFAULT_ABI == ABI_V4
33383 && (TARGET_RELOCATABLE || flag_pic > 1)
33384 && !TARGET_SECURE_PLT
33385 && (!constant_pool_empty_p () || crtl->profile)
33386 && (uses_toc = uses_TOC ()))
33388 char buf[256];
33390 if (uses_toc == 2)
33391 switch_to_other_text_partition ();
33392 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33394 fprintf (file, "\t.long ");
33395 assemble_name (file, toc_label_name);
33396 need_toc_init = 1;
33397 putc ('-', file);
33398 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33399 assemble_name (file, buf);
33400 putc ('\n', file);
33401 if (uses_toc == 2)
33402 switch_to_other_text_partition ();
33405 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33406 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33408 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33410 char buf[256];
33412 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33414 fprintf (file, "\t.quad .TOC.-");
33415 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33416 assemble_name (file, buf);
33417 putc ('\n', file);
33420 if (DEFAULT_ABI == ABI_AIX)
33422 const char *desc_name, *orig_name;
33424 orig_name = (*targetm.strip_name_encoding) (name);
33425 desc_name = orig_name;
33426 while (*desc_name == '.')
33427 desc_name++;
33429 if (TREE_PUBLIC (decl))
33430 fprintf (file, "\t.globl %s\n", desc_name);
33432 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33433 fprintf (file, "%s:\n", desc_name);
33434 fprintf (file, "\t.long %s\n", orig_name);
33435 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33436 fputs ("\t.long 0\n", file);
33437 fprintf (file, "\t.previous\n");
33439 ASM_OUTPUT_LABEL (file, name);
33442 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33443 static void
33444 rs6000_elf_file_end (void)
33446 #ifdef HAVE_AS_GNU_ATTRIBUTE
33447 /* ??? The value emitted depends on options active at file end.
33448 Assume anyone using #pragma or attributes that might change
33449 options knows what they are doing. */
33450 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33451 && rs6000_passes_float)
33453 int fp;
33455 if (TARGET_DF_FPR)
33456 fp = 1;
33457 else if (TARGET_SF_FPR)
33458 fp = 3;
33459 else
33460 fp = 2;
33461 if (rs6000_passes_long_double)
33463 if (!TARGET_LONG_DOUBLE_128)
33464 fp |= 2 * 4;
33465 else if (TARGET_IEEEQUAD)
33466 fp |= 3 * 4;
33467 else
33468 fp |= 1 * 4;
33470 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33472 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33474 if (rs6000_passes_vector)
33475 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33476 (TARGET_ALTIVEC_ABI ? 2 : 1));
33477 if (rs6000_returns_struct)
33478 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33479 aix_struct_return ? 2 : 1);
33481 #endif
33482 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33483 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33484 file_end_indicate_exec_stack ();
33485 #endif
33487 if (flag_split_stack)
33488 file_end_indicate_split_stack ();
33490 if (cpu_builtin_p)
33492 /* We have expanded a CPU builtin, so we need to emit a reference to
33493 the special symbol that LIBC uses to declare it supports the
33494 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33495 switch_to_section (data_section);
33496 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33497 fprintf (asm_out_file, "\t%s %s\n",
33498 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33501 #endif
33503 #if TARGET_XCOFF
33505 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33506 #define HAVE_XCOFF_DWARF_EXTRAS 0
33507 #endif
33509 static enum unwind_info_type
33510 rs6000_xcoff_debug_unwind_info (void)
33512 return UI_NONE;
33515 static void
33516 rs6000_xcoff_asm_output_anchor (rtx symbol)
33518 char buffer[100];
33520 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33521 SYMBOL_REF_BLOCK_OFFSET (symbol));
33522 fprintf (asm_out_file, "%s", SET_ASM_OP);
33523 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33524 fprintf (asm_out_file, ",");
33525 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33526 fprintf (asm_out_file, "\n");
33529 static void
33530 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33532 fputs (GLOBAL_ASM_OP, stream);
33533 RS6000_OUTPUT_BASENAME (stream, name);
33534 putc ('\n', stream);
33537 /* A get_unnamed_decl callback, used for read-only sections. PTR
33538 points to the section string variable. */
33540 static void
33541 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33543 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33544 *(const char *const *) directive,
33545 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33548 /* Likewise for read-write sections. */
33550 static void
33551 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33553 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33554 *(const char *const *) directive,
33555 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33558 static void
33559 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33561 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33562 *(const char *const *) directive,
33563 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33566 /* A get_unnamed_section callback, used for switching to toc_section. */
33568 static void
33569 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33571 if (TARGET_MINIMAL_TOC)
33573 /* toc_section is always selected at least once from
33574 rs6000_xcoff_file_start, so this is guaranteed to
33575 always be defined once and only once in each file. */
33576 if (!toc_initialized)
33578 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33579 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33580 toc_initialized = 1;
33582 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33583 (TARGET_32BIT ? "" : ",3"));
33585 else
33586 fputs ("\t.toc\n", asm_out_file);
33589 /* Implement TARGET_ASM_INIT_SECTIONS. */
33591 static void
33592 rs6000_xcoff_asm_init_sections (void)
33594 read_only_data_section
33595 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33596 &xcoff_read_only_section_name);
33598 private_data_section
33599 = get_unnamed_section (SECTION_WRITE,
33600 rs6000_xcoff_output_readwrite_section_asm_op,
33601 &xcoff_private_data_section_name);
33603 tls_data_section
33604 = get_unnamed_section (SECTION_TLS,
33605 rs6000_xcoff_output_tls_section_asm_op,
33606 &xcoff_tls_data_section_name);
33608 tls_private_data_section
33609 = get_unnamed_section (SECTION_TLS,
33610 rs6000_xcoff_output_tls_section_asm_op,
33611 &xcoff_private_data_section_name);
33613 read_only_private_data_section
33614 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33615 &xcoff_private_data_section_name);
33617 toc_section
33618 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33620 readonly_data_section = read_only_data_section;
33623 static int
33624 rs6000_xcoff_reloc_rw_mask (void)
33626 return 3;
33629 static void
33630 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33631 tree decl ATTRIBUTE_UNUSED)
33633 int smclass;
33634 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33636 if (flags & SECTION_EXCLUDE)
33637 smclass = 4;
33638 else if (flags & SECTION_DEBUG)
33640 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33641 return;
33643 else if (flags & SECTION_CODE)
33644 smclass = 0;
33645 else if (flags & SECTION_TLS)
33646 smclass = 3;
33647 else if (flags & SECTION_WRITE)
33648 smclass = 2;
33649 else
33650 smclass = 1;
33652 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33653 (flags & SECTION_CODE) ? "." : "",
33654 name, suffix[smclass], flags & SECTION_ENTSIZE);
33657 #define IN_NAMED_SECTION(DECL) \
33658 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33659 && DECL_SECTION_NAME (DECL) != NULL)
33661 static section *
33662 rs6000_xcoff_select_section (tree decl, int reloc,
33663 unsigned HOST_WIDE_INT align)
33665 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33666 named section. */
33667 if (align > BIGGEST_ALIGNMENT)
33669 resolve_unique_section (decl, reloc, true);
33670 if (IN_NAMED_SECTION (decl))
33671 return get_named_section (decl, NULL, reloc);
33674 if (decl_readonly_section (decl, reloc))
33676 if (TREE_PUBLIC (decl))
33677 return read_only_data_section;
33678 else
33679 return read_only_private_data_section;
33681 else
33683 #if HAVE_AS_TLS
33684 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33686 if (TREE_PUBLIC (decl))
33687 return tls_data_section;
33688 else if (bss_initializer_p (decl))
33690 /* Convert to COMMON to emit in BSS. */
33691 DECL_COMMON (decl) = 1;
33692 return tls_comm_section;
33694 else
33695 return tls_private_data_section;
33697 else
33698 #endif
33699 if (TREE_PUBLIC (decl))
33700 return data_section;
33701 else
33702 return private_data_section;
33706 static void
33707 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33709 const char *name;
33711 /* Use select_section for private data and uninitialized data with
33712 alignment <= BIGGEST_ALIGNMENT. */
33713 if (!TREE_PUBLIC (decl)
33714 || DECL_COMMON (decl)
33715 || (DECL_INITIAL (decl) == NULL_TREE
33716 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33717 || DECL_INITIAL (decl) == error_mark_node
33718 || (flag_zero_initialized_in_bss
33719 && initializer_zerop (DECL_INITIAL (decl))))
33720 return;
33722 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33723 name = (*targetm.strip_name_encoding) (name);
33724 set_decl_section_name (decl, name);
33727 /* Select section for constant in constant pool.
33729 On RS/6000, all constants are in the private read-only data area.
33730 However, if this is being placed in the TOC it must be output as a
33731 toc entry. */
33733 static section *
33734 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33735 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33737 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33738 return toc_section;
33739 else
33740 return read_only_private_data_section;
33743 /* Remove any trailing [DS] or the like from the symbol name. */
33745 static const char *
33746 rs6000_xcoff_strip_name_encoding (const char *name)
33748 size_t len;
33749 if (*name == '*')
33750 name++;
33751 len = strlen (name);
33752 if (name[len - 1] == ']')
33753 return ggc_alloc_string (name, len - 4);
33754 else
33755 return name;
33758 /* Section attributes. AIX is always PIC. */
33760 static unsigned int
33761 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33763 unsigned int align;
33764 unsigned int flags = default_section_type_flags (decl, name, reloc);
33766 /* Align to at least UNIT size. */
33767 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33768 align = MIN_UNITS_PER_WORD;
33769 else
33770 /* Increase alignment of large objects if not already stricter. */
33771 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33772 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33773 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33775 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33778 /* Output at beginning of assembler file.
33780 Initialize the section names for the RS/6000 at this point.
33782 Specify filename, including full path, to assembler.
33784 We want to go into the TOC section so at least one .toc will be emitted.
33785 Also, in order to output proper .bs/.es pairs, we need at least one static
33786 [RW] section emitted.
33788 Finally, declare mcount when profiling to make the assembler happy. */
33790 static void
33791 rs6000_xcoff_file_start (void)
33793 rs6000_gen_section_name (&xcoff_bss_section_name,
33794 main_input_filename, ".bss_");
33795 rs6000_gen_section_name (&xcoff_private_data_section_name,
33796 main_input_filename, ".rw_");
33797 rs6000_gen_section_name (&xcoff_read_only_section_name,
33798 main_input_filename, ".ro_");
33799 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33800 main_input_filename, ".tls_");
33801 rs6000_gen_section_name (&xcoff_tbss_section_name,
33802 main_input_filename, ".tbss_[UL]");
33804 fputs ("\t.file\t", asm_out_file);
33805 output_quoted_string (asm_out_file, main_input_filename);
33806 fputc ('\n', asm_out_file);
33807 if (write_symbols != NO_DEBUG)
33808 switch_to_section (private_data_section);
33809 switch_to_section (toc_section);
33810 switch_to_section (text_section);
33811 if (profile_flag)
33812 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33813 rs6000_file_start ();
33816 /* Output at end of assembler file.
33817 On the RS/6000, referencing data should automatically pull in text. */
33819 static void
33820 rs6000_xcoff_file_end (void)
33822 switch_to_section (text_section);
33823 fputs ("_section_.text:\n", asm_out_file);
33824 switch_to_section (data_section);
33825 fputs (TARGET_32BIT
33826 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33827 asm_out_file);
33830 struct declare_alias_data
33832 FILE *file;
33833 bool function_descriptor;
33836 /* Declare alias N. A helper function for for_node_and_aliases. */
33838 static bool
33839 rs6000_declare_alias (struct symtab_node *n, void *d)
33841 struct declare_alias_data *data = (struct declare_alias_data *)d;
33842 /* Main symbol is output specially, because varasm machinery does part of
33843 the job for us - we do not need to declare .globl/lglobs and such. */
33844 if (!n->alias || n->weakref)
33845 return false;
33847 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33848 return false;
33850 /* Prevent assemble_alias from trying to use .set pseudo operation
33851 that does not behave as expected by the middle-end. */
33852 TREE_ASM_WRITTEN (n->decl) = true;
33854 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33855 char *buffer = (char *) alloca (strlen (name) + 2);
33856 char *p;
33857 int dollar_inside = 0;
33859 strcpy (buffer, name);
33860 p = strchr (buffer, '$');
33861 while (p) {
33862 *p = '_';
33863 dollar_inside++;
33864 p = strchr (p + 1, '$');
33866 if (TREE_PUBLIC (n->decl))
33868 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33870 if (dollar_inside) {
33871 if (data->function_descriptor)
33872 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33873 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33875 if (data->function_descriptor)
33877 fputs ("\t.globl .", data->file);
33878 RS6000_OUTPUT_BASENAME (data->file, buffer);
33879 putc ('\n', data->file);
33881 fputs ("\t.globl ", data->file);
33882 RS6000_OUTPUT_BASENAME (data->file, buffer);
33883 putc ('\n', data->file);
33885 #ifdef ASM_WEAKEN_DECL
33886 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33887 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33888 #endif
33890 else
33892 if (dollar_inside)
33894 if (data->function_descriptor)
33895 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33896 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33898 if (data->function_descriptor)
33900 fputs ("\t.lglobl .", data->file);
33901 RS6000_OUTPUT_BASENAME (data->file, buffer);
33902 putc ('\n', data->file);
33904 fputs ("\t.lglobl ", data->file);
33905 RS6000_OUTPUT_BASENAME (data->file, buffer);
33906 putc ('\n', data->file);
33908 if (data->function_descriptor)
33909 fputs (".", data->file);
33910 RS6000_OUTPUT_BASENAME (data->file, buffer);
33911 fputs (":\n", data->file);
33912 return false;
33916 #ifdef HAVE_GAS_HIDDEN
33917 /* Helper function to calculate visibility of a DECL
33918 and return the value as a const string. */
33920 static const char *
33921 rs6000_xcoff_visibility (tree decl)
33923 static const char * const visibility_types[] = {
33924 "", ",protected", ",hidden", ",internal"
33927 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33929 if (TREE_CODE (decl) == FUNCTION_DECL
33930 && cgraph_node::get (decl)
33931 && cgraph_node::get (decl)->instrumentation_clone
33932 && cgraph_node::get (decl)->instrumented_version)
33933 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
33935 return visibility_types[vis];
33937 #endif
33940 /* This macro produces the initial definition of a function name.
33941 On the RS/6000, we need to place an extra '.' in the function name and
33942 output the function descriptor.
33943 Dollar signs are converted to underscores.
33945 The csect for the function will have already been created when
33946 text_section was selected. We do have to go back to that csect, however.
33948 The third and fourth parameters to the .function pseudo-op (16 and 044)
33949 are placeholders which no longer have any use.
33951 Because AIX assembler's .set command has unexpected semantics, we output
33952 all aliases as alternative labels in front of the definition. */
33954 void
33955 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33957 char *buffer = (char *) alloca (strlen (name) + 1);
33958 char *p;
33959 int dollar_inside = 0;
33960 struct declare_alias_data data = {file, false};
33962 strcpy (buffer, name);
33963 p = strchr (buffer, '$');
33964 while (p) {
33965 *p = '_';
33966 dollar_inside++;
33967 p = strchr (p + 1, '$');
33969 if (TREE_PUBLIC (decl))
33971 if (!RS6000_WEAK || !DECL_WEAK (decl))
33973 if (dollar_inside) {
33974 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33975 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33977 fputs ("\t.globl .", file);
33978 RS6000_OUTPUT_BASENAME (file, buffer);
33979 #ifdef HAVE_GAS_HIDDEN
33980 fputs (rs6000_xcoff_visibility (decl), file);
33981 #endif
33982 putc ('\n', file);
33985 else
33987 if (dollar_inside) {
33988 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33989 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33991 fputs ("\t.lglobl .", file);
33992 RS6000_OUTPUT_BASENAME (file, buffer);
33993 putc ('\n', file);
33995 fputs ("\t.csect ", file);
33996 RS6000_OUTPUT_BASENAME (file, buffer);
33997 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33998 RS6000_OUTPUT_BASENAME (file, buffer);
33999 fputs (":\n", file);
34000 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34001 &data, true);
34002 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34003 RS6000_OUTPUT_BASENAME (file, buffer);
34004 fputs (", TOC[tc0], 0\n", file);
34005 in_section = NULL;
34006 switch_to_section (function_section (decl));
34007 putc ('.', file);
34008 RS6000_OUTPUT_BASENAME (file, buffer);
34009 fputs (":\n", file);
34010 data.function_descriptor = true;
34011 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34012 &data, true);
34013 if (!DECL_IGNORED_P (decl))
34015 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34016 xcoffout_declare_function (file, decl, buffer);
34017 else if (write_symbols == DWARF2_DEBUG)
34019 name = (*targetm.strip_name_encoding) (name);
34020 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34023 return;
34027 /* Output assembly language to globalize a symbol from a DECL,
34028 possibly with visibility. */
34030 void
34031 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34033 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34034 fputs (GLOBAL_ASM_OP, stream);
34035 RS6000_OUTPUT_BASENAME (stream, name);
34036 #ifdef HAVE_GAS_HIDDEN
34037 fputs (rs6000_xcoff_visibility (decl), stream);
34038 #endif
34039 putc ('\n', stream);
34042 /* Output assembly language to define a symbol as COMMON from a DECL,
34043 possibly with visibility. */
34045 void
34046 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34047 tree decl ATTRIBUTE_UNUSED,
34048 const char *name,
34049 unsigned HOST_WIDE_INT size,
34050 unsigned HOST_WIDE_INT align)
34052 unsigned HOST_WIDE_INT align2 = 2;
34054 if (align > 32)
34055 align2 = floor_log2 (align / BITS_PER_UNIT);
34056 else if (size > 4)
34057 align2 = 3;
34059 fputs (COMMON_ASM_OP, stream);
34060 RS6000_OUTPUT_BASENAME (stream, name);
34062 fprintf (stream,
34063 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34064 size, align2);
34066 #ifdef HAVE_GAS_HIDDEN
34067 fputs (rs6000_xcoff_visibility (decl), stream);
34068 #endif
34069 putc ('\n', stream);
34072 /* This macro produces the initial definition of a object (variable) name.
34073 Because AIX assembler's .set command has unexpected semantics, we output
34074 all aliases as alternative labels in front of the definition. */
34076 void
34077 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34079 struct declare_alias_data data = {file, false};
34080 RS6000_OUTPUT_BASENAME (file, name);
34081 fputs (":\n", file);
34082 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34083 &data, true);
34086 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34088 void
34089 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34091 fputs (integer_asm_op (size, FALSE), file);
34092 assemble_name (file, label);
34093 fputs ("-$", file);
34096 /* Output a symbol offset relative to the dbase for the current object.
34097 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34098 signed offsets.
34100 __gcc_unwind_dbase is embedded in all executables/libraries through
34101 libgcc/config/rs6000/crtdbase.S. */
34103 void
34104 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34106 fputs (integer_asm_op (size, FALSE), file);
34107 assemble_name (file, label);
34108 fputs("-__gcc_unwind_dbase", file);
34111 #ifdef HAVE_AS_TLS
34112 static void
34113 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34115 rtx symbol;
34116 int flags;
34117 const char *symname;
34119 default_encode_section_info (decl, rtl, first);
34121 /* Careful not to prod global register variables. */
34122 if (!MEM_P (rtl))
34123 return;
34124 symbol = XEXP (rtl, 0);
34125 if (GET_CODE (symbol) != SYMBOL_REF)
34126 return;
34128 flags = SYMBOL_REF_FLAGS (symbol);
34130 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34131 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34133 SYMBOL_REF_FLAGS (symbol) = flags;
34135 /* Append mapping class to extern decls. */
34136 symname = XSTR (symbol, 0);
34137 if (decl /* sync condition with assemble_external () */
34138 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34139 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34140 || TREE_CODE (decl) == FUNCTION_DECL)
34141 && symname[strlen (symname) - 1] != ']')
34143 char *newname = (char *) alloca (strlen (symname) + 5);
34144 strcpy (newname, symname);
34145 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34146 ? "[DS]" : "[UA]"));
34147 XSTR (symbol, 0) = ggc_strdup (newname);
34150 #endif /* HAVE_AS_TLS */
34151 #endif /* TARGET_XCOFF */
34153 void
34154 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34155 const char *name, const char *val)
34157 fputs ("\t.weak\t", stream);
34158 RS6000_OUTPUT_BASENAME (stream, name);
34159 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34160 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34162 if (TARGET_XCOFF)
34163 fputs ("[DS]", stream);
34164 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34165 if (TARGET_XCOFF)
34166 fputs (rs6000_xcoff_visibility (decl), stream);
34167 #endif
34168 fputs ("\n\t.weak\t.", stream);
34169 RS6000_OUTPUT_BASENAME (stream, name);
34171 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34172 if (TARGET_XCOFF)
34173 fputs (rs6000_xcoff_visibility (decl), stream);
34174 #endif
34175 fputc ('\n', stream);
34176 if (val)
34178 #ifdef ASM_OUTPUT_DEF
34179 ASM_OUTPUT_DEF (stream, name, val);
34180 #endif
34181 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34182 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34184 fputs ("\t.set\t.", stream);
34185 RS6000_OUTPUT_BASENAME (stream, name);
34186 fputs (",.", stream);
34187 RS6000_OUTPUT_BASENAME (stream, val);
34188 fputc ('\n', stream);
34194 /* Return true if INSN should not be copied. */
34196 static bool
34197 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34199 return recog_memoized (insn) >= 0
34200 && get_attr_cannot_copy (insn);
34203 /* Compute a (partial) cost for rtx X. Return true if the complete
34204 cost has been computed, and false if subexpressions should be
34205 scanned. In either case, *TOTAL contains the cost result. */
34207 static bool
34208 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34209 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34211 int code = GET_CODE (x);
34213 switch (code)
34215 /* On the RS/6000, if it is valid in the insn, it is free. */
34216 case CONST_INT:
34217 if (((outer_code == SET
34218 || outer_code == PLUS
34219 || outer_code == MINUS)
34220 && (satisfies_constraint_I (x)
34221 || satisfies_constraint_L (x)))
34222 || (outer_code == AND
34223 && (satisfies_constraint_K (x)
34224 || (mode == SImode
34225 ? satisfies_constraint_L (x)
34226 : satisfies_constraint_J (x))))
34227 || ((outer_code == IOR || outer_code == XOR)
34228 && (satisfies_constraint_K (x)
34229 || (mode == SImode
34230 ? satisfies_constraint_L (x)
34231 : satisfies_constraint_J (x))))
34232 || outer_code == ASHIFT
34233 || outer_code == ASHIFTRT
34234 || outer_code == LSHIFTRT
34235 || outer_code == ROTATE
34236 || outer_code == ROTATERT
34237 || outer_code == ZERO_EXTRACT
34238 || (outer_code == MULT
34239 && satisfies_constraint_I (x))
34240 || ((outer_code == DIV || outer_code == UDIV
34241 || outer_code == MOD || outer_code == UMOD)
34242 && exact_log2 (INTVAL (x)) >= 0)
34243 || (outer_code == COMPARE
34244 && (satisfies_constraint_I (x)
34245 || satisfies_constraint_K (x)))
34246 || ((outer_code == EQ || outer_code == NE)
34247 && (satisfies_constraint_I (x)
34248 || satisfies_constraint_K (x)
34249 || (mode == SImode
34250 ? satisfies_constraint_L (x)
34251 : satisfies_constraint_J (x))))
34252 || (outer_code == GTU
34253 && satisfies_constraint_I (x))
34254 || (outer_code == LTU
34255 && satisfies_constraint_P (x)))
34257 *total = 0;
34258 return true;
34260 else if ((outer_code == PLUS
34261 && reg_or_add_cint_operand (x, VOIDmode))
34262 || (outer_code == MINUS
34263 && reg_or_sub_cint_operand (x, VOIDmode))
34264 || ((outer_code == SET
34265 || outer_code == IOR
34266 || outer_code == XOR)
34267 && (INTVAL (x)
34268 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34270 *total = COSTS_N_INSNS (1);
34271 return true;
34273 /* FALLTHRU */
34275 case CONST_DOUBLE:
34276 case CONST_WIDE_INT:
34277 case CONST:
34278 case HIGH:
34279 case SYMBOL_REF:
34280 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34281 return true;
34283 case MEM:
34284 /* When optimizing for size, MEM should be slightly more expensive
34285 than generating address, e.g., (plus (reg) (const)).
34286 L1 cache latency is about two instructions. */
34287 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34288 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34289 *total += COSTS_N_INSNS (100);
34290 return true;
34292 case LABEL_REF:
34293 *total = 0;
34294 return true;
34296 case PLUS:
34297 case MINUS:
34298 if (FLOAT_MODE_P (mode))
34299 *total = rs6000_cost->fp;
34300 else
34301 *total = COSTS_N_INSNS (1);
34302 return false;
34304 case MULT:
34305 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34306 && satisfies_constraint_I (XEXP (x, 1)))
34308 if (INTVAL (XEXP (x, 1)) >= -256
34309 && INTVAL (XEXP (x, 1)) <= 255)
34310 *total = rs6000_cost->mulsi_const9;
34311 else
34312 *total = rs6000_cost->mulsi_const;
34314 else if (mode == SFmode)
34315 *total = rs6000_cost->fp;
34316 else if (FLOAT_MODE_P (mode))
34317 *total = rs6000_cost->dmul;
34318 else if (mode == DImode)
34319 *total = rs6000_cost->muldi;
34320 else
34321 *total = rs6000_cost->mulsi;
34322 return false;
34324 case FMA:
34325 if (mode == SFmode)
34326 *total = rs6000_cost->fp;
34327 else
34328 *total = rs6000_cost->dmul;
34329 break;
34331 case DIV:
34332 case MOD:
34333 if (FLOAT_MODE_P (mode))
34335 *total = mode == DFmode ? rs6000_cost->ddiv
34336 : rs6000_cost->sdiv;
34337 return false;
34339 /* FALLTHRU */
34341 case UDIV:
34342 case UMOD:
34343 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34344 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34346 if (code == DIV || code == MOD)
34347 /* Shift, addze */
34348 *total = COSTS_N_INSNS (2);
34349 else
34350 /* Shift */
34351 *total = COSTS_N_INSNS (1);
34353 else
34355 if (GET_MODE (XEXP (x, 1)) == DImode)
34356 *total = rs6000_cost->divdi;
34357 else
34358 *total = rs6000_cost->divsi;
34360 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34361 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34362 *total += COSTS_N_INSNS (2);
34363 return false;
34365 case CTZ:
34366 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34367 return false;
34369 case FFS:
34370 *total = COSTS_N_INSNS (4);
34371 return false;
34373 case POPCOUNT:
34374 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34375 return false;
34377 case PARITY:
34378 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34379 return false;
34381 case NOT:
34382 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34383 *total = 0;
34384 else
34385 *total = COSTS_N_INSNS (1);
34386 return false;
34388 case AND:
34389 if (CONST_INT_P (XEXP (x, 1)))
34391 rtx left = XEXP (x, 0);
34392 rtx_code left_code = GET_CODE (left);
34394 /* rotate-and-mask: 1 insn. */
34395 if ((left_code == ROTATE
34396 || left_code == ASHIFT
34397 || left_code == LSHIFTRT)
34398 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34400 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34401 if (!CONST_INT_P (XEXP (left, 1)))
34402 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34403 *total += COSTS_N_INSNS (1);
34404 return true;
34407 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34408 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34409 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34410 || (val & 0xffff) == val
34411 || (val & 0xffff0000) == val
34412 || ((val & 0xffff) == 0 && mode == SImode))
34414 *total = rtx_cost (left, mode, AND, 0, speed);
34415 *total += COSTS_N_INSNS (1);
34416 return true;
34419 /* 2 insns. */
34420 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34422 *total = rtx_cost (left, mode, AND, 0, speed);
34423 *total += COSTS_N_INSNS (2);
34424 return true;
34428 *total = COSTS_N_INSNS (1);
34429 return false;
34431 case IOR:
34432 /* FIXME */
34433 *total = COSTS_N_INSNS (1);
34434 return true;
34436 case CLZ:
34437 case XOR:
34438 case ZERO_EXTRACT:
34439 *total = COSTS_N_INSNS (1);
34440 return false;
34442 case ASHIFT:
34443 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34444 the sign extend and shift separately within the insn. */
34445 if (TARGET_EXTSWSLI && mode == DImode
34446 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34447 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34449 *total = 0;
34450 return false;
34452 /* fall through */
34454 case ASHIFTRT:
34455 case LSHIFTRT:
34456 case ROTATE:
34457 case ROTATERT:
34458 /* Handle mul_highpart. */
34459 if (outer_code == TRUNCATE
34460 && GET_CODE (XEXP (x, 0)) == MULT)
34462 if (mode == DImode)
34463 *total = rs6000_cost->muldi;
34464 else
34465 *total = rs6000_cost->mulsi;
34466 return true;
34468 else if (outer_code == AND)
34469 *total = 0;
34470 else
34471 *total = COSTS_N_INSNS (1);
34472 return false;
34474 case SIGN_EXTEND:
34475 case ZERO_EXTEND:
34476 if (GET_CODE (XEXP (x, 0)) == MEM)
34477 *total = 0;
34478 else
34479 *total = COSTS_N_INSNS (1);
34480 return false;
34482 case COMPARE:
34483 case NEG:
34484 case ABS:
34485 if (!FLOAT_MODE_P (mode))
34487 *total = COSTS_N_INSNS (1);
34488 return false;
34490 /* FALLTHRU */
34492 case FLOAT:
34493 case UNSIGNED_FLOAT:
34494 case FIX:
34495 case UNSIGNED_FIX:
34496 case FLOAT_TRUNCATE:
34497 *total = rs6000_cost->fp;
34498 return false;
34500 case FLOAT_EXTEND:
34501 if (mode == DFmode)
34502 *total = rs6000_cost->sfdf_convert;
34503 else
34504 *total = rs6000_cost->fp;
34505 return false;
34507 case UNSPEC:
34508 switch (XINT (x, 1))
34510 case UNSPEC_FRSP:
34511 *total = rs6000_cost->fp;
34512 return true;
34514 default:
34515 break;
34517 break;
34519 case CALL:
34520 case IF_THEN_ELSE:
34521 if (!speed)
34523 *total = COSTS_N_INSNS (1);
34524 return true;
34526 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34528 *total = rs6000_cost->fp;
34529 return false;
34531 break;
34533 case NE:
34534 case EQ:
34535 case GTU:
34536 case LTU:
34537 /* Carry bit requires mode == Pmode.
34538 NEG or PLUS already counted so only add one. */
34539 if (mode == Pmode
34540 && (outer_code == NEG || outer_code == PLUS))
34542 *total = COSTS_N_INSNS (1);
34543 return true;
34545 if (outer_code == SET)
34547 if (XEXP (x, 1) == const0_rtx)
34549 if (TARGET_ISEL && !TARGET_MFCRF)
34550 *total = COSTS_N_INSNS (8);
34551 else
34552 *total = COSTS_N_INSNS (2);
34553 return true;
34555 else
34557 *total = COSTS_N_INSNS (3);
34558 return false;
34561 /* FALLTHRU */
34563 case GT:
34564 case LT:
34565 case UNORDERED:
34566 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
34568 if (TARGET_ISEL && !TARGET_MFCRF)
34569 *total = COSTS_N_INSNS (8);
34570 else
34571 *total = COSTS_N_INSNS (2);
34572 return true;
34574 /* CC COMPARE. */
34575 if (outer_code == COMPARE)
34577 *total = 0;
34578 return true;
34580 break;
34582 default:
34583 break;
34586 return false;
34589 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34591 static bool
34592 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34593 int opno, int *total, bool speed)
34595 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34597 fprintf (stderr,
34598 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34599 "opno = %d, total = %d, speed = %s, x:\n",
34600 ret ? "complete" : "scan inner",
34601 GET_MODE_NAME (mode),
34602 GET_RTX_NAME (outer_code),
34603 opno,
34604 *total,
34605 speed ? "true" : "false");
34607 debug_rtx (x);
34609 return ret;
34612 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34614 static int
34615 rs6000_debug_address_cost (rtx x, machine_mode mode,
34616 addr_space_t as, bool speed)
34618 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34620 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34621 ret, speed ? "true" : "false");
34622 debug_rtx (x);
34624 return ret;
34628 /* A C expression returning the cost of moving data from a register of class
34629 CLASS1 to one of CLASS2. */
34631 static int
34632 rs6000_register_move_cost (machine_mode mode,
34633 reg_class_t from, reg_class_t to)
34635 int ret;
34637 if (TARGET_DEBUG_COST)
34638 dbg_cost_ctrl++;
34640 /* Moves from/to GENERAL_REGS. */
34641 if (reg_classes_intersect_p (to, GENERAL_REGS)
34642 || reg_classes_intersect_p (from, GENERAL_REGS))
34644 reg_class_t rclass = from;
34646 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34647 rclass = to;
34649 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34650 ret = (rs6000_memory_move_cost (mode, rclass, false)
34651 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34653 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34654 shift. */
34655 else if (rclass == CR_REGS)
34656 ret = 4;
34658 /* For those processors that have slow LR/CTR moves, make them more
34659 expensive than memory in order to bias spills to memory .*/
34660 else if ((rs6000_cpu == PROCESSOR_POWER6
34661 || rs6000_cpu == PROCESSOR_POWER7
34662 || rs6000_cpu == PROCESSOR_POWER8
34663 || rs6000_cpu == PROCESSOR_POWER9)
34664 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34665 ret = 6 * hard_regno_nregs (0, mode);
34667 else
34668 /* A move will cost one instruction per GPR moved. */
34669 ret = 2 * hard_regno_nregs (0, mode);
34672 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34673 else if (VECTOR_MEM_VSX_P (mode)
34674 && reg_classes_intersect_p (to, VSX_REGS)
34675 && reg_classes_intersect_p (from, VSX_REGS))
34676 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34678 /* Moving between two similar registers is just one instruction. */
34679 else if (reg_classes_intersect_p (to, from))
34680 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34682 /* Everything else has to go through GENERAL_REGS. */
34683 else
34684 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34685 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34687 if (TARGET_DEBUG_COST)
34689 if (dbg_cost_ctrl == 1)
34690 fprintf (stderr,
34691 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34692 ret, GET_MODE_NAME (mode), reg_class_names[from],
34693 reg_class_names[to]);
34694 dbg_cost_ctrl--;
34697 return ret;
34700 /* A C expressions returning the cost of moving data of MODE from a register to
34701 or from memory. */
34703 static int
34704 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34705 bool in ATTRIBUTE_UNUSED)
34707 int ret;
34709 if (TARGET_DEBUG_COST)
34710 dbg_cost_ctrl++;
34712 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34713 ret = 4 * hard_regno_nregs (0, mode);
34714 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34715 || reg_classes_intersect_p (rclass, VSX_REGS)))
34716 ret = 4 * hard_regno_nregs (32, mode);
34717 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34718 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34719 else
34720 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34722 if (TARGET_DEBUG_COST)
34724 if (dbg_cost_ctrl == 1)
34725 fprintf (stderr,
34726 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34727 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34728 dbg_cost_ctrl--;
34731 return ret;
34734 /* Returns a code for a target-specific builtin that implements
34735 reciprocal of the function, or NULL_TREE if not available. */
34737 static tree
34738 rs6000_builtin_reciprocal (tree fndecl)
34740 switch (DECL_FUNCTION_CODE (fndecl))
34742 case VSX_BUILTIN_XVSQRTDP:
34743 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34744 return NULL_TREE;
34746 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34748 case VSX_BUILTIN_XVSQRTSP:
34749 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34750 return NULL_TREE;
34752 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34754 default:
34755 return NULL_TREE;
34759 /* Load up a constant. If the mode is a vector mode, splat the value across
34760 all of the vector elements. */
34762 static rtx
34763 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34765 rtx reg;
34767 if (mode == SFmode || mode == DFmode)
34769 rtx d = const_double_from_real_value (dconst, mode);
34770 reg = force_reg (mode, d);
34772 else if (mode == V4SFmode)
34774 rtx d = const_double_from_real_value (dconst, SFmode);
34775 rtvec v = gen_rtvec (4, d, d, d, d);
34776 reg = gen_reg_rtx (mode);
34777 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34779 else if (mode == V2DFmode)
34781 rtx d = const_double_from_real_value (dconst, DFmode);
34782 rtvec v = gen_rtvec (2, d, d);
34783 reg = gen_reg_rtx (mode);
34784 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34786 else
34787 gcc_unreachable ();
34789 return reg;
34792 /* Generate an FMA instruction. */
34794 static void
34795 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34797 machine_mode mode = GET_MODE (target);
34798 rtx dst;
34800 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34801 gcc_assert (dst != NULL);
34803 if (dst != target)
34804 emit_move_insn (target, dst);
34807 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34809 static void
34810 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34812 machine_mode mode = GET_MODE (dst);
34813 rtx r;
34815 /* This is a tad more complicated, since the fnma_optab is for
34816 a different expression: fma(-m1, m2, a), which is the same
34817 thing except in the case of signed zeros.
34819 Fortunately we know that if FMA is supported that FNMSUB is
34820 also supported in the ISA. Just expand it directly. */
34822 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34824 r = gen_rtx_NEG (mode, a);
34825 r = gen_rtx_FMA (mode, m1, m2, r);
34826 r = gen_rtx_NEG (mode, r);
34827 emit_insn (gen_rtx_SET (dst, r));
34830 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34831 add a reg_note saying that this was a division. Support both scalar and
34832 vector divide. Assumes no trapping math and finite arguments. */
34834 void
34835 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34837 machine_mode mode = GET_MODE (dst);
34838 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34839 int i;
34841 /* Low precision estimates guarantee 5 bits of accuracy. High
34842 precision estimates guarantee 14 bits of accuracy. SFmode
34843 requires 23 bits of accuracy. DFmode requires 52 bits of
34844 accuracy. Each pass at least doubles the accuracy, leading
34845 to the following. */
34846 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34847 if (mode == DFmode || mode == V2DFmode)
34848 passes++;
34850 enum insn_code code = optab_handler (smul_optab, mode);
34851 insn_gen_fn gen_mul = GEN_FCN (code);
34853 gcc_assert (code != CODE_FOR_nothing);
34855 one = rs6000_load_constant_and_splat (mode, dconst1);
34857 /* x0 = 1./d estimate */
34858 x0 = gen_reg_rtx (mode);
34859 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34860 UNSPEC_FRES)));
34862 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34863 if (passes > 1) {
34865 /* e0 = 1. - d * x0 */
34866 e0 = gen_reg_rtx (mode);
34867 rs6000_emit_nmsub (e0, d, x0, one);
34869 /* x1 = x0 + e0 * x0 */
34870 x1 = gen_reg_rtx (mode);
34871 rs6000_emit_madd (x1, e0, x0, x0);
34873 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34874 ++i, xprev = xnext, eprev = enext) {
34876 /* enext = eprev * eprev */
34877 enext = gen_reg_rtx (mode);
34878 emit_insn (gen_mul (enext, eprev, eprev));
34880 /* xnext = xprev + enext * xprev */
34881 xnext = gen_reg_rtx (mode);
34882 rs6000_emit_madd (xnext, enext, xprev, xprev);
34885 } else
34886 xprev = x0;
34888 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34890 /* u = n * xprev */
34891 u = gen_reg_rtx (mode);
34892 emit_insn (gen_mul (u, n, xprev));
34894 /* v = n - (d * u) */
34895 v = gen_reg_rtx (mode);
34896 rs6000_emit_nmsub (v, d, u, n);
34898 /* dst = (v * xprev) + u */
34899 rs6000_emit_madd (dst, v, xprev, u);
34901 if (note_p)
34902 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34905 /* Goldschmidt's Algorithm for single/double-precision floating point
34906 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34908 void
34909 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34911 machine_mode mode = GET_MODE (src);
34912 rtx e = gen_reg_rtx (mode);
34913 rtx g = gen_reg_rtx (mode);
34914 rtx h = gen_reg_rtx (mode);
34916 /* Low precision estimates guarantee 5 bits of accuracy. High
34917 precision estimates guarantee 14 bits of accuracy. SFmode
34918 requires 23 bits of accuracy. DFmode requires 52 bits of
34919 accuracy. Each pass at least doubles the accuracy, leading
34920 to the following. */
34921 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34922 if (mode == DFmode || mode == V2DFmode)
34923 passes++;
34925 int i;
34926 rtx mhalf;
34927 enum insn_code code = optab_handler (smul_optab, mode);
34928 insn_gen_fn gen_mul = GEN_FCN (code);
34930 gcc_assert (code != CODE_FOR_nothing);
34932 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34934 /* e = rsqrt estimate */
34935 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34936 UNSPEC_RSQRT)));
34938 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34939 if (!recip)
34941 rtx zero = force_reg (mode, CONST0_RTX (mode));
34943 if (mode == SFmode)
34945 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34946 e, zero, mode, 0);
34947 if (target != e)
34948 emit_move_insn (e, target);
34950 else
34952 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34953 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34957 /* g = sqrt estimate. */
34958 emit_insn (gen_mul (g, e, src));
34959 /* h = 1/(2*sqrt) estimate. */
34960 emit_insn (gen_mul (h, e, mhalf));
34962 if (recip)
34964 if (passes == 1)
34966 rtx t = gen_reg_rtx (mode);
34967 rs6000_emit_nmsub (t, g, h, mhalf);
34968 /* Apply correction directly to 1/rsqrt estimate. */
34969 rs6000_emit_madd (dst, e, t, e);
34971 else
34973 for (i = 0; i < passes; i++)
34975 rtx t1 = gen_reg_rtx (mode);
34976 rtx g1 = gen_reg_rtx (mode);
34977 rtx h1 = gen_reg_rtx (mode);
34979 rs6000_emit_nmsub (t1, g, h, mhalf);
34980 rs6000_emit_madd (g1, g, t1, g);
34981 rs6000_emit_madd (h1, h, t1, h);
34983 g = g1;
34984 h = h1;
34986 /* Multiply by 2 for 1/rsqrt. */
34987 emit_insn (gen_add3_insn (dst, h, h));
34990 else
34992 rtx t = gen_reg_rtx (mode);
34993 rs6000_emit_nmsub (t, g, h, mhalf);
34994 rs6000_emit_madd (dst, g, t, g);
34997 return;
35000 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35001 (Power7) targets. DST is the target, and SRC is the argument operand. */
35003 void
35004 rs6000_emit_popcount (rtx dst, rtx src)
35006 machine_mode mode = GET_MODE (dst);
35007 rtx tmp1, tmp2;
35009 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35010 if (TARGET_POPCNTD)
35012 if (mode == SImode)
35013 emit_insn (gen_popcntdsi2 (dst, src));
35014 else
35015 emit_insn (gen_popcntddi2 (dst, src));
35016 return;
35019 tmp1 = gen_reg_rtx (mode);
35021 if (mode == SImode)
35023 emit_insn (gen_popcntbsi2 (tmp1, src));
35024 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35025 NULL_RTX, 0);
35026 tmp2 = force_reg (SImode, tmp2);
35027 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35029 else
35031 emit_insn (gen_popcntbdi2 (tmp1, src));
35032 tmp2 = expand_mult (DImode, tmp1,
35033 GEN_INT ((HOST_WIDE_INT)
35034 0x01010101 << 32 | 0x01010101),
35035 NULL_RTX, 0);
35036 tmp2 = force_reg (DImode, tmp2);
35037 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35042 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35043 target, and SRC is the argument operand. */
35045 void
35046 rs6000_emit_parity (rtx dst, rtx src)
35048 machine_mode mode = GET_MODE (dst);
35049 rtx tmp;
35051 tmp = gen_reg_rtx (mode);
35053 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35054 if (TARGET_CMPB)
35056 if (mode == SImode)
35058 emit_insn (gen_popcntbsi2 (tmp, src));
35059 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35061 else
35063 emit_insn (gen_popcntbdi2 (tmp, src));
35064 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35066 return;
35069 if (mode == SImode)
35071 /* Is mult+shift >= shift+xor+shift+xor? */
35072 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35074 rtx tmp1, tmp2, tmp3, tmp4;
35076 tmp1 = gen_reg_rtx (SImode);
35077 emit_insn (gen_popcntbsi2 (tmp1, src));
35079 tmp2 = gen_reg_rtx (SImode);
35080 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35081 tmp3 = gen_reg_rtx (SImode);
35082 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35084 tmp4 = gen_reg_rtx (SImode);
35085 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35086 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35088 else
35089 rs6000_emit_popcount (tmp, src);
35090 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35092 else
35094 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35095 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35097 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35099 tmp1 = gen_reg_rtx (DImode);
35100 emit_insn (gen_popcntbdi2 (tmp1, src));
35102 tmp2 = gen_reg_rtx (DImode);
35103 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35104 tmp3 = gen_reg_rtx (DImode);
35105 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35107 tmp4 = gen_reg_rtx (DImode);
35108 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35109 tmp5 = gen_reg_rtx (DImode);
35110 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35112 tmp6 = gen_reg_rtx (DImode);
35113 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35114 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35116 else
35117 rs6000_emit_popcount (tmp, src);
35118 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35122 /* Expand an Altivec constant permutation for little endian mode.
35123 There are two issues: First, the two input operands must be
35124 swapped so that together they form a double-wide array in LE
35125 order. Second, the vperm instruction has surprising behavior
35126 in LE mode: it interprets the elements of the source vectors
35127 in BE mode ("left to right") and interprets the elements of
35128 the destination vector in LE mode ("right to left"). To
35129 correct for this, we must subtract each element of the permute
35130 control vector from 31.
35132 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35133 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35134 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35135 serve as the permute control vector. Then, in BE mode,
35137 vperm 9,10,11,12
35139 places the desired result in vr9. However, in LE mode the
35140 vector contents will be
35142 vr10 = 00000003 00000002 00000001 00000000
35143 vr11 = 00000007 00000006 00000005 00000004
35145 The result of the vperm using the same permute control vector is
35147 vr9 = 05000000 07000000 01000000 03000000
35149 That is, the leftmost 4 bytes of vr10 are interpreted as the
35150 source for the rightmost 4 bytes of vr9, and so on.
35152 If we change the permute control vector to
35154 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35156 and issue
35158 vperm 9,11,10,12
35160 we get the desired
35162 vr9 = 00000006 00000004 00000002 00000000. */
35164 void
35165 altivec_expand_vec_perm_const_le (rtx operands[4])
35167 unsigned int i;
35168 rtx perm[16];
35169 rtx constv, unspec;
35170 rtx target = operands[0];
35171 rtx op0 = operands[1];
35172 rtx op1 = operands[2];
35173 rtx sel = operands[3];
35175 /* Unpack and adjust the constant selector. */
35176 for (i = 0; i < 16; ++i)
35178 rtx e = XVECEXP (sel, 0, i);
35179 unsigned int elt = 31 - (INTVAL (e) & 31);
35180 perm[i] = GEN_INT (elt);
35183 /* Expand to a permute, swapping the inputs and using the
35184 adjusted selector. */
35185 if (!REG_P (op0))
35186 op0 = force_reg (V16QImode, op0);
35187 if (!REG_P (op1))
35188 op1 = force_reg (V16QImode, op1);
35190 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35191 constv = force_reg (V16QImode, constv);
35192 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35193 UNSPEC_VPERM);
35194 if (!REG_P (target))
35196 rtx tmp = gen_reg_rtx (V16QImode);
35197 emit_move_insn (tmp, unspec);
35198 unspec = tmp;
35201 emit_move_insn (target, unspec);
35204 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35205 permute control vector. But here it's not a constant, so we must
35206 generate a vector NAND or NOR to do the adjustment. */
35208 void
35209 altivec_expand_vec_perm_le (rtx operands[4])
35211 rtx notx, iorx, unspec;
35212 rtx target = operands[0];
35213 rtx op0 = operands[1];
35214 rtx op1 = operands[2];
35215 rtx sel = operands[3];
35216 rtx tmp = target;
35217 rtx norreg = gen_reg_rtx (V16QImode);
35218 machine_mode mode = GET_MODE (target);
35220 /* Get everything in regs so the pattern matches. */
35221 if (!REG_P (op0))
35222 op0 = force_reg (mode, op0);
35223 if (!REG_P (op1))
35224 op1 = force_reg (mode, op1);
35225 if (!REG_P (sel))
35226 sel = force_reg (V16QImode, sel);
35227 if (!REG_P (target))
35228 tmp = gen_reg_rtx (mode);
35230 if (TARGET_P9_VECTOR)
35232 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
35233 UNSPEC_VPERMR);
35235 else
35237 /* Invert the selector with a VNAND if available, else a VNOR.
35238 The VNAND is preferred for future fusion opportunities. */
35239 notx = gen_rtx_NOT (V16QImode, sel);
35240 iorx = (TARGET_P8_VECTOR
35241 ? gen_rtx_IOR (V16QImode, notx, notx)
35242 : gen_rtx_AND (V16QImode, notx, notx));
35243 emit_insn (gen_rtx_SET (norreg, iorx));
35245 /* Permute with operands reversed and adjusted selector. */
35246 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35247 UNSPEC_VPERM);
35250 /* Copy into target, possibly by way of a register. */
35251 if (!REG_P (target))
35253 emit_move_insn (tmp, unspec);
35254 unspec = tmp;
35257 emit_move_insn (target, unspec);
35260 /* Expand an Altivec constant permutation. Return true if we match
35261 an efficient implementation; false to fall back to VPERM. */
35263 bool
35264 altivec_expand_vec_perm_const (rtx operands[4])
35266 struct altivec_perm_insn {
35267 HOST_WIDE_INT mask;
35268 enum insn_code impl;
35269 unsigned char perm[16];
35271 static const struct altivec_perm_insn patterns[] = {
35272 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35273 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35274 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35275 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35276 { OPTION_MASK_ALTIVEC,
35277 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35278 : CODE_FOR_altivec_vmrglb_direct),
35279 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35280 { OPTION_MASK_ALTIVEC,
35281 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35282 : CODE_FOR_altivec_vmrglh_direct),
35283 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35284 { OPTION_MASK_ALTIVEC,
35285 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35286 : CODE_FOR_altivec_vmrglw_direct),
35287 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35288 { OPTION_MASK_ALTIVEC,
35289 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35290 : CODE_FOR_altivec_vmrghb_direct),
35291 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35292 { OPTION_MASK_ALTIVEC,
35293 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35294 : CODE_FOR_altivec_vmrghh_direct),
35295 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35296 { OPTION_MASK_ALTIVEC,
35297 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35298 : CODE_FOR_altivec_vmrghw_direct),
35299 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35300 { OPTION_MASK_P8_VECTOR,
35301 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35302 : CODE_FOR_p8_vmrgow_v4sf_direct),
35303 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35304 { OPTION_MASK_P8_VECTOR,
35305 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35306 : CODE_FOR_p8_vmrgew_v4sf_direct),
35307 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35310 unsigned int i, j, elt, which;
35311 unsigned char perm[16];
35312 rtx target, op0, op1, sel, x;
35313 bool one_vec;
35315 target = operands[0];
35316 op0 = operands[1];
35317 op1 = operands[2];
35318 sel = operands[3];
35320 /* Unpack the constant selector. */
35321 for (i = which = 0; i < 16; ++i)
35323 rtx e = XVECEXP (sel, 0, i);
35324 elt = INTVAL (e) & 31;
35325 which |= (elt < 16 ? 1 : 2);
35326 perm[i] = elt;
35329 /* Simplify the constant selector based on operands. */
35330 switch (which)
35332 default:
35333 gcc_unreachable ();
35335 case 3:
35336 one_vec = false;
35337 if (!rtx_equal_p (op0, op1))
35338 break;
35339 /* FALLTHRU */
35341 case 2:
35342 for (i = 0; i < 16; ++i)
35343 perm[i] &= 15;
35344 op0 = op1;
35345 one_vec = true;
35346 break;
35348 case 1:
35349 op1 = op0;
35350 one_vec = true;
35351 break;
35354 /* Look for splat patterns. */
35355 if (one_vec)
35357 elt = perm[0];
35359 for (i = 0; i < 16; ++i)
35360 if (perm[i] != elt)
35361 break;
35362 if (i == 16)
35364 if (!BYTES_BIG_ENDIAN)
35365 elt = 15 - elt;
35366 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35367 return true;
35370 if (elt % 2 == 0)
35372 for (i = 0; i < 16; i += 2)
35373 if (perm[i] != elt || perm[i + 1] != elt + 1)
35374 break;
35375 if (i == 16)
35377 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35378 x = gen_reg_rtx (V8HImode);
35379 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35380 GEN_INT (field)));
35381 emit_move_insn (target, gen_lowpart (V16QImode, x));
35382 return true;
35386 if (elt % 4 == 0)
35388 for (i = 0; i < 16; i += 4)
35389 if (perm[i] != elt
35390 || perm[i + 1] != elt + 1
35391 || perm[i + 2] != elt + 2
35392 || perm[i + 3] != elt + 3)
35393 break;
35394 if (i == 16)
35396 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35397 x = gen_reg_rtx (V4SImode);
35398 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35399 GEN_INT (field)));
35400 emit_move_insn (target, gen_lowpart (V16QImode, x));
35401 return true;
35406 /* Look for merge and pack patterns. */
35407 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35409 bool swapped;
35411 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35412 continue;
35414 elt = patterns[j].perm[0];
35415 if (perm[0] == elt)
35416 swapped = false;
35417 else if (perm[0] == elt + 16)
35418 swapped = true;
35419 else
35420 continue;
35421 for (i = 1; i < 16; ++i)
35423 elt = patterns[j].perm[i];
35424 if (swapped)
35425 elt = (elt >= 16 ? elt - 16 : elt + 16);
35426 else if (one_vec && elt >= 16)
35427 elt -= 16;
35428 if (perm[i] != elt)
35429 break;
35431 if (i == 16)
35433 enum insn_code icode = patterns[j].impl;
35434 machine_mode omode = insn_data[icode].operand[0].mode;
35435 machine_mode imode = insn_data[icode].operand[1].mode;
35437 /* For little-endian, don't use vpkuwum and vpkuhum if the
35438 underlying vector type is not V4SI and V8HI, respectively.
35439 For example, using vpkuwum with a V8HI picks up the even
35440 halfwords (BE numbering) when the even halfwords (LE
35441 numbering) are what we need. */
35442 if (!BYTES_BIG_ENDIAN
35443 && icode == CODE_FOR_altivec_vpkuwum_direct
35444 && ((GET_CODE (op0) == REG
35445 && GET_MODE (op0) != V4SImode)
35446 || (GET_CODE (op0) == SUBREG
35447 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35448 continue;
35449 if (!BYTES_BIG_ENDIAN
35450 && icode == CODE_FOR_altivec_vpkuhum_direct
35451 && ((GET_CODE (op0) == REG
35452 && GET_MODE (op0) != V8HImode)
35453 || (GET_CODE (op0) == SUBREG
35454 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35455 continue;
35457 /* For little-endian, the two input operands must be swapped
35458 (or swapped back) to ensure proper right-to-left numbering
35459 from 0 to 2N-1. */
35460 if (swapped ^ !BYTES_BIG_ENDIAN)
35461 std::swap (op0, op1);
35462 if (imode != V16QImode)
35464 op0 = gen_lowpart (imode, op0);
35465 op1 = gen_lowpart (imode, op1);
35467 if (omode == V16QImode)
35468 x = target;
35469 else
35470 x = gen_reg_rtx (omode);
35471 emit_insn (GEN_FCN (icode) (x, op0, op1));
35472 if (omode != V16QImode)
35473 emit_move_insn (target, gen_lowpart (V16QImode, x));
35474 return true;
35478 if (!BYTES_BIG_ENDIAN)
35480 altivec_expand_vec_perm_const_le (operands);
35481 return true;
35484 return false;
35487 /* Expand a Paired Single or VSX Permute Doubleword constant permutation.
35488 Return true if we match an efficient implementation. */
35490 static bool
35491 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35492 unsigned char perm0, unsigned char perm1)
35494 rtx x;
35496 /* If both selectors come from the same operand, fold to single op. */
35497 if ((perm0 & 2) == (perm1 & 2))
35499 if (perm0 & 2)
35500 op0 = op1;
35501 else
35502 op1 = op0;
35504 /* If both operands are equal, fold to simpler permutation. */
35505 if (rtx_equal_p (op0, op1))
35507 perm0 = perm0 & 1;
35508 perm1 = (perm1 & 1) + 2;
35510 /* If the first selector comes from the second operand, swap. */
35511 else if (perm0 & 2)
35513 if (perm1 & 2)
35514 return false;
35515 perm0 -= 2;
35516 perm1 += 2;
35517 std::swap (op0, op1);
35519 /* If the second selector does not come from the second operand, fail. */
35520 else if ((perm1 & 2) == 0)
35521 return false;
35523 /* Success! */
35524 if (target != NULL)
35526 machine_mode vmode, dmode;
35527 rtvec v;
35529 vmode = GET_MODE (target);
35530 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35531 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35532 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35533 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35534 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35535 emit_insn (gen_rtx_SET (target, x));
35537 return true;
35540 bool
35541 rs6000_expand_vec_perm_const (rtx operands[4])
35543 rtx target, op0, op1, sel;
35544 unsigned char perm0, perm1;
35546 target = operands[0];
35547 op0 = operands[1];
35548 op1 = operands[2];
35549 sel = operands[3];
35551 /* Unpack the constant selector. */
35552 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
35553 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
35555 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
35558 /* Test whether a constant permutation is supported. */
35560 static bool
35561 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
35562 const unsigned char *sel)
35564 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35565 if (TARGET_ALTIVEC)
35566 return true;
35568 /* Check for ps_merge* or evmerge* insns. */
35569 if (TARGET_PAIRED_FLOAT && vmode == V2SFmode)
35571 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35572 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35573 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
35576 return false;
35579 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
35581 static void
35582 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35583 machine_mode vmode, unsigned nelt, rtx perm[])
35585 machine_mode imode;
35586 rtx x;
35588 imode = vmode;
35589 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
35590 imode = mode_for_int_vector (vmode).require ();
35592 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
35593 x = expand_vec_perm (vmode, op0, op1, x, target);
35594 if (x != target)
35595 emit_move_insn (target, x);
35598 /* Expand an extract even operation. */
35600 void
35601 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35603 machine_mode vmode = GET_MODE (target);
35604 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35605 rtx perm[16];
35607 for (i = 0; i < nelt; i++)
35608 perm[i] = GEN_INT (i * 2);
35610 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35613 /* Expand a vector interleave operation. */
35615 void
35616 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35618 machine_mode vmode = GET_MODE (target);
35619 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35620 rtx perm[16];
35622 high = (highp ? 0 : nelt / 2);
35623 for (i = 0; i < nelt / 2; i++)
35625 perm[i * 2] = GEN_INT (i + high);
35626 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
35629 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
35632 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35633 void
35634 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35636 HOST_WIDE_INT hwi_scale (scale);
35637 REAL_VALUE_TYPE r_pow;
35638 rtvec v = rtvec_alloc (2);
35639 rtx elt;
35640 rtx scale_vec = gen_reg_rtx (V2DFmode);
35641 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35642 elt = const_double_from_real_value (r_pow, DFmode);
35643 RTVEC_ELT (v, 0) = elt;
35644 RTVEC_ELT (v, 1) = elt;
35645 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35646 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35649 /* Return an RTX representing where to find the function value of a
35650 function returning MODE. */
35651 static rtx
35652 rs6000_complex_function_value (machine_mode mode)
35654 unsigned int regno;
35655 rtx r1, r2;
35656 machine_mode inner = GET_MODE_INNER (mode);
35657 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35659 if (TARGET_FLOAT128_TYPE
35660 && (mode == KCmode
35661 || (mode == TCmode && TARGET_IEEEQUAD)))
35662 regno = ALTIVEC_ARG_RETURN;
35664 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35665 regno = FP_ARG_RETURN;
35667 else
35669 regno = GP_ARG_RETURN;
35671 /* 32-bit is OK since it'll go in r3/r4. */
35672 if (TARGET_32BIT && inner_bytes >= 4)
35673 return gen_rtx_REG (mode, regno);
35676 if (inner_bytes >= 8)
35677 return gen_rtx_REG (mode, regno);
35679 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35680 const0_rtx);
35681 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35682 GEN_INT (inner_bytes));
35683 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35686 /* Return an rtx describing a return value of MODE as a PARALLEL
35687 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35688 stride REG_STRIDE. */
35690 static rtx
35691 rs6000_parallel_return (machine_mode mode,
35692 int n_elts, machine_mode elt_mode,
35693 unsigned int regno, unsigned int reg_stride)
35695 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35697 int i;
35698 for (i = 0; i < n_elts; i++)
35700 rtx r = gen_rtx_REG (elt_mode, regno);
35701 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35702 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35703 regno += reg_stride;
35706 return par;
35709 /* Target hook for TARGET_FUNCTION_VALUE.
35711 An integer value is in r3 and a floating-point value is in fp1,
35712 unless -msoft-float. */
35714 static rtx
35715 rs6000_function_value (const_tree valtype,
35716 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35717 bool outgoing ATTRIBUTE_UNUSED)
35719 machine_mode mode;
35720 unsigned int regno;
35721 machine_mode elt_mode;
35722 int n_elts;
35724 /* Special handling for structs in darwin64. */
35725 if (TARGET_MACHO
35726 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35728 CUMULATIVE_ARGS valcum;
35729 rtx valret;
35731 valcum.words = 0;
35732 valcum.fregno = FP_ARG_MIN_REG;
35733 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35734 /* Do a trial code generation as if this were going to be passed as
35735 an argument; if any part goes in memory, we return NULL. */
35736 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35737 if (valret)
35738 return valret;
35739 /* Otherwise fall through to standard ABI rules. */
35742 mode = TYPE_MODE (valtype);
35744 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35745 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35747 int first_reg, n_regs;
35749 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35751 /* _Decimal128 must use even/odd register pairs. */
35752 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35753 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35755 else
35757 first_reg = ALTIVEC_ARG_RETURN;
35758 n_regs = 1;
35761 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35764 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35765 if (TARGET_32BIT && TARGET_POWERPC64)
35766 switch (mode)
35768 default:
35769 break;
35770 case E_DImode:
35771 case E_SCmode:
35772 case E_DCmode:
35773 case E_TCmode:
35774 int count = GET_MODE_SIZE (mode) / 4;
35775 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35778 if ((INTEGRAL_TYPE_P (valtype)
35779 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35780 || POINTER_TYPE_P (valtype))
35781 mode = TARGET_32BIT ? SImode : DImode;
35783 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35784 /* _Decimal128 must use an even/odd register pair. */
35785 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35786 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35787 && !FLOAT128_VECTOR_P (mode)
35788 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
35789 regno = FP_ARG_RETURN;
35790 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35791 && targetm.calls.split_complex_arg)
35792 return rs6000_complex_function_value (mode);
35793 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35794 return register is used in both cases, and we won't see V2DImode/V2DFmode
35795 for pure altivec, combine the two cases. */
35796 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35797 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35798 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35799 regno = ALTIVEC_ARG_RETURN;
35800 else
35801 regno = GP_ARG_RETURN;
35803 return gen_rtx_REG (mode, regno);
35806 /* Define how to find the value returned by a library function
35807 assuming the value has mode MODE. */
35809 rs6000_libcall_value (machine_mode mode)
35811 unsigned int regno;
35813 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35814 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35815 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35817 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35818 /* _Decimal128 must use an even/odd register pair. */
35819 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35820 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
35821 && TARGET_HARD_FLOAT
35822 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
35823 regno = FP_ARG_RETURN;
35824 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35825 return register is used in both cases, and we won't see V2DImode/V2DFmode
35826 for pure altivec, combine the two cases. */
35827 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35828 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35829 regno = ALTIVEC_ARG_RETURN;
35830 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35831 return rs6000_complex_function_value (mode);
35832 else
35833 regno = GP_ARG_RETURN;
35835 return gen_rtx_REG (mode, regno);
35838 /* Compute register pressure classes. We implement the target hook to avoid
35839 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35840 lead to incorrect estimates of number of available registers and therefor
35841 increased register pressure/spill. */
35842 static int
35843 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35845 int n;
35847 n = 0;
35848 pressure_classes[n++] = GENERAL_REGS;
35849 if (TARGET_VSX)
35850 pressure_classes[n++] = VSX_REGS;
35851 else
35853 if (TARGET_ALTIVEC)
35854 pressure_classes[n++] = ALTIVEC_REGS;
35855 if (TARGET_HARD_FLOAT)
35856 pressure_classes[n++] = FLOAT_REGS;
35858 pressure_classes[n++] = CR_REGS;
35859 pressure_classes[n++] = SPECIAL_REGS;
35861 return n;
35864 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35865 Frame pointer elimination is automatically handled.
35867 For the RS/6000, if frame pointer elimination is being done, we would like
35868 to convert ap into fp, not sp.
35870 We need r30 if -mminimal-toc was specified, and there are constant pool
35871 references. */
35873 static bool
35874 rs6000_can_eliminate (const int from, const int to)
35876 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35877 ? ! frame_pointer_needed
35878 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35879 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35880 || constant_pool_empty_p ()
35881 : true);
35884 /* Define the offset between two registers, FROM to be eliminated and its
35885 replacement TO, at the start of a routine. */
35886 HOST_WIDE_INT
35887 rs6000_initial_elimination_offset (int from, int to)
35889 rs6000_stack_t *info = rs6000_stack_info ();
35890 HOST_WIDE_INT offset;
35892 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35893 offset = info->push_p ? 0 : -info->total_size;
35894 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35896 offset = info->push_p ? 0 : -info->total_size;
35897 if (FRAME_GROWS_DOWNWARD)
35898 offset += info->fixed_size + info->vars_size + info->parm_size;
35900 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35901 offset = FRAME_GROWS_DOWNWARD
35902 ? info->fixed_size + info->vars_size + info->parm_size
35903 : 0;
35904 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35905 offset = info->total_size;
35906 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35907 offset = info->push_p ? info->total_size : 0;
35908 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35909 offset = 0;
35910 else
35911 gcc_unreachable ();
35913 return offset;
35916 /* Fill in sizes of registers used by unwinder. */
35918 static void
35919 rs6000_init_dwarf_reg_sizes_extra (tree address)
35921 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35923 int i;
35924 machine_mode mode = TYPE_MODE (char_type_node);
35925 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35926 rtx mem = gen_rtx_MEM (BLKmode, addr);
35927 rtx value = gen_int_mode (16, mode);
35929 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35930 The unwinder still needs to know the size of Altivec registers. */
35932 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35934 int column = DWARF_REG_TO_UNWIND_COLUMN
35935 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35936 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35938 emit_move_insn (adjust_address (mem, mode, offset), value);
35943 /* Map internal gcc register numbers to debug format register numbers.
35944 FORMAT specifies the type of debug register number to use:
35945 0 -- debug information, except for frame-related sections
35946 1 -- DWARF .debug_frame section
35947 2 -- DWARF .eh_frame section */
35949 unsigned int
35950 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35952 /* Except for the above, we use the internal number for non-DWARF
35953 debug information, and also for .eh_frame. */
35954 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35955 return regno;
35957 /* On some platforms, we use the standard DWARF register
35958 numbering for .debug_info and .debug_frame. */
35959 #ifdef RS6000_USE_DWARF_NUMBERING
35960 if (regno <= 63)
35961 return regno;
35962 if (regno == LR_REGNO)
35963 return 108;
35964 if (regno == CTR_REGNO)
35965 return 109;
35966 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35967 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35968 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35969 to the DWARF reg for CR. */
35970 if (format == 1 && regno == CR2_REGNO)
35971 return 64;
35972 if (CR_REGNO_P (regno))
35973 return regno - CR0_REGNO + 86;
35974 if (regno == CA_REGNO)
35975 return 101; /* XER */
35976 if (ALTIVEC_REGNO_P (regno))
35977 return regno - FIRST_ALTIVEC_REGNO + 1124;
35978 if (regno == VRSAVE_REGNO)
35979 return 356;
35980 if (regno == VSCR_REGNO)
35981 return 67;
35982 #endif
35983 return regno;
35986 /* target hook eh_return_filter_mode */
35987 static scalar_int_mode
35988 rs6000_eh_return_filter_mode (void)
35990 return TARGET_32BIT ? SImode : word_mode;
35993 /* Target hook for scalar_mode_supported_p. */
35994 static bool
35995 rs6000_scalar_mode_supported_p (scalar_mode mode)
35997 /* -m32 does not support TImode. This is the default, from
35998 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35999 same ABI as for -m32. But default_scalar_mode_supported_p allows
36000 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36001 for -mpowerpc64. */
36002 if (TARGET_32BIT && mode == TImode)
36003 return false;
36005 if (DECIMAL_FLOAT_MODE_P (mode))
36006 return default_decimal_float_supported_p ();
36007 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36008 return true;
36009 else
36010 return default_scalar_mode_supported_p (mode);
36013 /* Target hook for vector_mode_supported_p. */
36014 static bool
36015 rs6000_vector_mode_supported_p (machine_mode mode)
36018 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
36019 return true;
36021 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36022 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36023 double-double. */
36024 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36025 return true;
36027 else
36028 return false;
36031 /* Target hook for floatn_mode. */
36032 static opt_scalar_float_mode
36033 rs6000_floatn_mode (int n, bool extended)
36035 if (extended)
36037 switch (n)
36039 case 32:
36040 return DFmode;
36042 case 64:
36043 if (TARGET_FLOAT128_TYPE)
36044 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36045 else
36046 return opt_scalar_float_mode ();
36048 case 128:
36049 return opt_scalar_float_mode ();
36051 default:
36052 /* Those are the only valid _FloatNx types. */
36053 gcc_unreachable ();
36056 else
36058 switch (n)
36060 case 32:
36061 return SFmode;
36063 case 64:
36064 return DFmode;
36066 case 128:
36067 if (TARGET_FLOAT128_TYPE)
36068 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36069 else
36070 return opt_scalar_float_mode ();
36072 default:
36073 return opt_scalar_float_mode ();
36079 /* Target hook for c_mode_for_suffix. */
36080 static machine_mode
36081 rs6000_c_mode_for_suffix (char suffix)
36083 if (TARGET_FLOAT128_TYPE)
36085 if (suffix == 'q' || suffix == 'Q')
36086 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36088 /* At the moment, we are not defining a suffix for IBM extended double.
36089 If/when the default for -mabi=ieeelongdouble is changed, and we want
36090 to support __ibm128 constants in legacy library code, we may need to
36091 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36092 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36093 __float80 constants. */
36096 return VOIDmode;
36099 /* Target hook for invalid_arg_for_unprototyped_fn. */
36100 static const char *
36101 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36103 return (!rs6000_darwin64_abi
36104 && typelist == 0
36105 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36106 && (funcdecl == NULL_TREE
36107 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36108 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36109 ? N_("AltiVec argument passed to unprototyped function")
36110 : NULL;
36113 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36114 setup by using __stack_chk_fail_local hidden function instead of
36115 calling __stack_chk_fail directly. Otherwise it is better to call
36116 __stack_chk_fail directly. */
36118 static tree ATTRIBUTE_UNUSED
36119 rs6000_stack_protect_fail (void)
36121 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36122 ? default_hidden_stack_protect_fail ()
36123 : default_external_stack_protect_fail ();
36126 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36128 #if TARGET_ELF
36129 static unsigned HOST_WIDE_INT
36130 rs6000_asan_shadow_offset (void)
36132 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36134 #endif
36136 /* Mask options that we want to support inside of attribute((target)) and
36137 #pragma GCC target operations. Note, we do not include things like
36138 64/32-bit, endianness, hard/soft floating point, etc. that would have
36139 different calling sequences. */
36141 struct rs6000_opt_mask {
36142 const char *name; /* option name */
36143 HOST_WIDE_INT mask; /* mask to set */
36144 bool invert; /* invert sense of mask */
36145 bool valid_target; /* option is a target option */
36148 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36150 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36151 { "cmpb", OPTION_MASK_CMPB, false, true },
36152 { "crypto", OPTION_MASK_CRYPTO, false, true },
36153 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36154 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36155 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36156 false, true },
36157 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36158 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36159 { "fprnd", OPTION_MASK_FPRND, false, true },
36160 { "hard-dfp", OPTION_MASK_DFP, false, true },
36161 { "htm", OPTION_MASK_HTM, false, true },
36162 { "isel", OPTION_MASK_ISEL, false, true },
36163 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36164 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36165 { "modulo", OPTION_MASK_MODULO, false, true },
36166 { "mulhw", OPTION_MASK_MULHW, false, true },
36167 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36168 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36169 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36170 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36171 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36172 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36173 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36174 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36175 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36176 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36177 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36178 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36179 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36180 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36181 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36182 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36183 { "string", OPTION_MASK_STRING, false, true },
36184 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
36185 { "update", OPTION_MASK_NO_UPDATE, true , true },
36186 { "vsx", OPTION_MASK_VSX, false, true },
36187 #ifdef OPTION_MASK_64BIT
36188 #if TARGET_AIX_OS
36189 { "aix64", OPTION_MASK_64BIT, false, false },
36190 { "aix32", OPTION_MASK_64BIT, true, false },
36191 #else
36192 { "64", OPTION_MASK_64BIT, false, false },
36193 { "32", OPTION_MASK_64BIT, true, false },
36194 #endif
36195 #endif
36196 #ifdef OPTION_MASK_EABI
36197 { "eabi", OPTION_MASK_EABI, false, false },
36198 #endif
36199 #ifdef OPTION_MASK_LITTLE_ENDIAN
36200 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36201 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36202 #endif
36203 #ifdef OPTION_MASK_RELOCATABLE
36204 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36205 #endif
36206 #ifdef OPTION_MASK_STRICT_ALIGN
36207 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36208 #endif
36209 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36210 { "string", OPTION_MASK_STRING, false, false },
36213 /* Builtin mask mapping for printing the flags. */
36214 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36216 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36217 { "vsx", RS6000_BTM_VSX, false, false },
36218 { "paired", RS6000_BTM_PAIRED, false, false },
36219 { "fre", RS6000_BTM_FRE, false, false },
36220 { "fres", RS6000_BTM_FRES, false, false },
36221 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36222 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36223 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36224 { "cell", RS6000_BTM_CELL, false, false },
36225 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36226 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36227 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36228 { "crypto", RS6000_BTM_CRYPTO, false, false },
36229 { "htm", RS6000_BTM_HTM, false, false },
36230 { "hard-dfp", RS6000_BTM_DFP, false, false },
36231 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36232 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36233 { "float128", RS6000_BTM_FLOAT128, false, false },
36236 /* Option variables that we want to support inside attribute((target)) and
36237 #pragma GCC target operations. */
36239 struct rs6000_opt_var {
36240 const char *name; /* option name */
36241 size_t global_offset; /* offset of the option in global_options. */
36242 size_t target_offset; /* offset of the option in target options. */
36245 static struct rs6000_opt_var const rs6000_opt_vars[] =
36247 { "friz",
36248 offsetof (struct gcc_options, x_TARGET_FRIZ),
36249 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36250 { "avoid-indexed-addresses",
36251 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36252 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36253 { "paired",
36254 offsetof (struct gcc_options, x_rs6000_paired_float),
36255 offsetof (struct cl_target_option, x_rs6000_paired_float), },
36256 { "longcall",
36257 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36258 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36259 { "optimize-swaps",
36260 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36261 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36262 { "allow-movmisalign",
36263 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36264 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36265 { "sched-groups",
36266 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36267 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36268 { "always-hint",
36269 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36270 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36271 { "align-branch-targets",
36272 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36273 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36274 { "tls-markers",
36275 offsetof (struct gcc_options, x_tls_markers),
36276 offsetof (struct cl_target_option, x_tls_markers), },
36277 { "sched-prolog",
36278 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36279 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36280 { "sched-epilog",
36281 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36282 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36285 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36286 parsing. Return true if there were no errors. */
36288 static bool
36289 rs6000_inner_target_options (tree args, bool attr_p)
36291 bool ret = true;
36293 if (args == NULL_TREE)
36296 else if (TREE_CODE (args) == STRING_CST)
36298 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36299 char *q;
36301 while ((q = strtok (p, ",")) != NULL)
36303 bool error_p = false;
36304 bool not_valid_p = false;
36305 const char *cpu_opt = NULL;
36307 p = NULL;
36308 if (strncmp (q, "cpu=", 4) == 0)
36310 int cpu_index = rs6000_cpu_name_lookup (q+4);
36311 if (cpu_index >= 0)
36312 rs6000_cpu_index = cpu_index;
36313 else
36315 error_p = true;
36316 cpu_opt = q+4;
36319 else if (strncmp (q, "tune=", 5) == 0)
36321 int tune_index = rs6000_cpu_name_lookup (q+5);
36322 if (tune_index >= 0)
36323 rs6000_tune_index = tune_index;
36324 else
36326 error_p = true;
36327 cpu_opt = q+5;
36330 else
36332 size_t i;
36333 bool invert = false;
36334 char *r = q;
36336 error_p = true;
36337 if (strncmp (r, "no-", 3) == 0)
36339 invert = true;
36340 r += 3;
36343 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36344 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36346 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36348 if (!rs6000_opt_masks[i].valid_target)
36349 not_valid_p = true;
36350 else
36352 error_p = false;
36353 rs6000_isa_flags_explicit |= mask;
36355 /* VSX needs altivec, so -mvsx automagically sets
36356 altivec and disables -mavoid-indexed-addresses. */
36357 if (!invert)
36359 if (mask == OPTION_MASK_VSX)
36361 mask |= OPTION_MASK_ALTIVEC;
36362 TARGET_AVOID_XFORM = 0;
36366 if (rs6000_opt_masks[i].invert)
36367 invert = !invert;
36369 if (invert)
36370 rs6000_isa_flags &= ~mask;
36371 else
36372 rs6000_isa_flags |= mask;
36374 break;
36377 if (error_p && !not_valid_p)
36379 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36380 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36382 size_t j = rs6000_opt_vars[i].global_offset;
36383 *((int *) ((char *)&global_options + j)) = !invert;
36384 error_p = false;
36385 not_valid_p = false;
36386 break;
36391 if (error_p)
36393 const char *eprefix, *esuffix;
36395 ret = false;
36396 if (attr_p)
36398 eprefix = "__attribute__((__target__(";
36399 esuffix = ")))";
36401 else
36403 eprefix = "#pragma GCC target ";
36404 esuffix = "";
36407 if (cpu_opt)
36408 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36409 q, esuffix);
36410 else if (not_valid_p)
36411 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36412 else
36413 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36418 else if (TREE_CODE (args) == TREE_LIST)
36422 tree value = TREE_VALUE (args);
36423 if (value)
36425 bool ret2 = rs6000_inner_target_options (value, attr_p);
36426 if (!ret2)
36427 ret = false;
36429 args = TREE_CHAIN (args);
36431 while (args != NULL_TREE);
36434 else
36436 error ("attribute %<target%> argument not a string");
36437 return false;
36440 return ret;
36443 /* Print out the target options as a list for -mdebug=target. */
36445 static void
36446 rs6000_debug_target_options (tree args, const char *prefix)
36448 if (args == NULL_TREE)
36449 fprintf (stderr, "%s<NULL>", prefix);
36451 else if (TREE_CODE (args) == STRING_CST)
36453 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36454 char *q;
36456 while ((q = strtok (p, ",")) != NULL)
36458 p = NULL;
36459 fprintf (stderr, "%s\"%s\"", prefix, q);
36460 prefix = ", ";
36464 else if (TREE_CODE (args) == TREE_LIST)
36468 tree value = TREE_VALUE (args);
36469 if (value)
36471 rs6000_debug_target_options (value, prefix);
36472 prefix = ", ";
36474 args = TREE_CHAIN (args);
36476 while (args != NULL_TREE);
36479 else
36480 gcc_unreachable ();
36482 return;
36486 /* Hook to validate attribute((target("..."))). */
36488 static bool
36489 rs6000_valid_attribute_p (tree fndecl,
36490 tree ARG_UNUSED (name),
36491 tree args,
36492 int flags)
36494 struct cl_target_option cur_target;
36495 bool ret;
36496 tree old_optimize = build_optimization_node (&global_options);
36497 tree new_target, new_optimize;
36498 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36500 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36502 if (TARGET_DEBUG_TARGET)
36504 tree tname = DECL_NAME (fndecl);
36505 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36506 if (tname)
36507 fprintf (stderr, "function: %.*s\n",
36508 (int) IDENTIFIER_LENGTH (tname),
36509 IDENTIFIER_POINTER (tname));
36510 else
36511 fprintf (stderr, "function: unknown\n");
36513 fprintf (stderr, "args:");
36514 rs6000_debug_target_options (args, " ");
36515 fprintf (stderr, "\n");
36517 if (flags)
36518 fprintf (stderr, "flags: 0x%x\n", flags);
36520 fprintf (stderr, "--------------------\n");
36523 /* attribute((target("default"))) does nothing, beyond
36524 affecting multi-versioning. */
36525 if (TREE_VALUE (args)
36526 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36527 && TREE_CHAIN (args) == NULL_TREE
36528 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36529 return true;
36531 old_optimize = build_optimization_node (&global_options);
36532 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36534 /* If the function changed the optimization levels as well as setting target
36535 options, start with the optimizations specified. */
36536 if (func_optimize && func_optimize != old_optimize)
36537 cl_optimization_restore (&global_options,
36538 TREE_OPTIMIZATION (func_optimize));
36540 /* The target attributes may also change some optimization flags, so update
36541 the optimization options if necessary. */
36542 cl_target_option_save (&cur_target, &global_options);
36543 rs6000_cpu_index = rs6000_tune_index = -1;
36544 ret = rs6000_inner_target_options (args, true);
36546 /* Set up any additional state. */
36547 if (ret)
36549 ret = rs6000_option_override_internal (false);
36550 new_target = build_target_option_node (&global_options);
36552 else
36553 new_target = NULL;
36555 new_optimize = build_optimization_node (&global_options);
36557 if (!new_target)
36558 ret = false;
36560 else if (fndecl)
36562 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36564 if (old_optimize != new_optimize)
36565 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36568 cl_target_option_restore (&global_options, &cur_target);
36570 if (old_optimize != new_optimize)
36571 cl_optimization_restore (&global_options,
36572 TREE_OPTIMIZATION (old_optimize));
36574 return ret;
36578 /* Hook to validate the current #pragma GCC target and set the state, and
36579 update the macros based on what was changed. If ARGS is NULL, then
36580 POP_TARGET is used to reset the options. */
36582 bool
36583 rs6000_pragma_target_parse (tree args, tree pop_target)
36585 tree prev_tree = build_target_option_node (&global_options);
36586 tree cur_tree;
36587 struct cl_target_option *prev_opt, *cur_opt;
36588 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36589 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36591 if (TARGET_DEBUG_TARGET)
36593 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36594 fprintf (stderr, "args:");
36595 rs6000_debug_target_options (args, " ");
36596 fprintf (stderr, "\n");
36598 if (pop_target)
36600 fprintf (stderr, "pop_target:\n");
36601 debug_tree (pop_target);
36603 else
36604 fprintf (stderr, "pop_target: <NULL>\n");
36606 fprintf (stderr, "--------------------\n");
36609 if (! args)
36611 cur_tree = ((pop_target)
36612 ? pop_target
36613 : target_option_default_node);
36614 cl_target_option_restore (&global_options,
36615 TREE_TARGET_OPTION (cur_tree));
36617 else
36619 rs6000_cpu_index = rs6000_tune_index = -1;
36620 if (!rs6000_inner_target_options (args, false)
36621 || !rs6000_option_override_internal (false)
36622 || (cur_tree = build_target_option_node (&global_options))
36623 == NULL_TREE)
36625 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36626 fprintf (stderr, "invalid pragma\n");
36628 return false;
36632 target_option_current_node = cur_tree;
36634 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36635 change the macros that are defined. */
36636 if (rs6000_target_modify_macros_ptr)
36638 prev_opt = TREE_TARGET_OPTION (prev_tree);
36639 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36640 prev_flags = prev_opt->x_rs6000_isa_flags;
36642 cur_opt = TREE_TARGET_OPTION (cur_tree);
36643 cur_flags = cur_opt->x_rs6000_isa_flags;
36644 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36646 diff_bumask = (prev_bumask ^ cur_bumask);
36647 diff_flags = (prev_flags ^ cur_flags);
36649 if ((diff_flags != 0) || (diff_bumask != 0))
36651 /* Delete old macros. */
36652 rs6000_target_modify_macros_ptr (false,
36653 prev_flags & diff_flags,
36654 prev_bumask & diff_bumask);
36656 /* Define new macros. */
36657 rs6000_target_modify_macros_ptr (true,
36658 cur_flags & diff_flags,
36659 cur_bumask & diff_bumask);
36663 return true;
36667 /* Remember the last target of rs6000_set_current_function. */
36668 static GTY(()) tree rs6000_previous_fndecl;
36670 /* Restore target's globals from NEW_TREE and invalidate the
36671 rs6000_previous_fndecl cache. */
36673 static void
36674 rs6000_activate_target_options (tree new_tree)
36676 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36677 if (TREE_TARGET_GLOBALS (new_tree))
36678 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36679 else if (new_tree == target_option_default_node)
36680 restore_target_globals (&default_target_globals);
36681 else
36682 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36683 rs6000_previous_fndecl = NULL_TREE;
36686 /* Establish appropriate back-end context for processing the function
36687 FNDECL. The argument might be NULL to indicate processing at top
36688 level, outside of any function scope. */
36689 static void
36690 rs6000_set_current_function (tree fndecl)
36692 if (TARGET_DEBUG_TARGET)
36694 fprintf (stderr, "\n==================== rs6000_set_current_function");
36696 if (fndecl)
36697 fprintf (stderr, ", fndecl %s (%p)",
36698 (DECL_NAME (fndecl)
36699 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36700 : "<unknown>"), (void *)fndecl);
36702 if (rs6000_previous_fndecl)
36703 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36705 fprintf (stderr, "\n");
36708 /* Only change the context if the function changes. This hook is called
36709 several times in the course of compiling a function, and we don't want to
36710 slow things down too much or call target_reinit when it isn't safe. */
36711 if (fndecl == rs6000_previous_fndecl)
36712 return;
36714 tree old_tree;
36715 if (rs6000_previous_fndecl == NULL_TREE)
36716 old_tree = target_option_current_node;
36717 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36718 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36719 else
36720 old_tree = target_option_default_node;
36722 tree new_tree;
36723 if (fndecl == NULL_TREE)
36725 if (old_tree != target_option_current_node)
36726 new_tree = target_option_current_node;
36727 else
36728 new_tree = NULL_TREE;
36730 else
36732 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36733 if (new_tree == NULL_TREE)
36734 new_tree = target_option_default_node;
36737 if (TARGET_DEBUG_TARGET)
36739 if (new_tree)
36741 fprintf (stderr, "\nnew fndecl target specific options:\n");
36742 debug_tree (new_tree);
36745 if (old_tree)
36747 fprintf (stderr, "\nold fndecl target specific options:\n");
36748 debug_tree (old_tree);
36751 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36752 fprintf (stderr, "--------------------\n");
36755 if (new_tree && old_tree != new_tree)
36756 rs6000_activate_target_options (new_tree);
36758 if (fndecl)
36759 rs6000_previous_fndecl = fndecl;
36763 /* Save the current options */
36765 static void
36766 rs6000_function_specific_save (struct cl_target_option *ptr,
36767 struct gcc_options *opts)
36769 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36770 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36773 /* Restore the current options */
36775 static void
36776 rs6000_function_specific_restore (struct gcc_options *opts,
36777 struct cl_target_option *ptr)
36780 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36781 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36782 (void) rs6000_option_override_internal (false);
36785 /* Print the current options */
36787 static void
36788 rs6000_function_specific_print (FILE *file, int indent,
36789 struct cl_target_option *ptr)
36791 rs6000_print_isa_options (file, indent, "Isa options set",
36792 ptr->x_rs6000_isa_flags);
36794 rs6000_print_isa_options (file, indent, "Isa options explicit",
36795 ptr->x_rs6000_isa_flags_explicit);
36798 /* Helper function to print the current isa or misc options on a line. */
36800 static void
36801 rs6000_print_options_internal (FILE *file,
36802 int indent,
36803 const char *string,
36804 HOST_WIDE_INT flags,
36805 const char *prefix,
36806 const struct rs6000_opt_mask *opts,
36807 size_t num_elements)
36809 size_t i;
36810 size_t start_column = 0;
36811 size_t cur_column;
36812 size_t max_column = 120;
36813 size_t prefix_len = strlen (prefix);
36814 size_t comma_len = 0;
36815 const char *comma = "";
36817 if (indent)
36818 start_column += fprintf (file, "%*s", indent, "");
36820 if (!flags)
36822 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36823 return;
36826 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36828 /* Print the various mask options. */
36829 cur_column = start_column;
36830 for (i = 0; i < num_elements; i++)
36832 bool invert = opts[i].invert;
36833 const char *name = opts[i].name;
36834 const char *no_str = "";
36835 HOST_WIDE_INT mask = opts[i].mask;
36836 size_t len = comma_len + prefix_len + strlen (name);
36838 if (!invert)
36840 if ((flags & mask) == 0)
36842 no_str = "no-";
36843 len += sizeof ("no-") - 1;
36846 flags &= ~mask;
36849 else
36851 if ((flags & mask) != 0)
36853 no_str = "no-";
36854 len += sizeof ("no-") - 1;
36857 flags |= mask;
36860 cur_column += len;
36861 if (cur_column > max_column)
36863 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36864 cur_column = start_column + len;
36865 comma = "";
36868 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36869 comma = ", ";
36870 comma_len = sizeof (", ") - 1;
36873 fputs ("\n", file);
36876 /* Helper function to print the current isa options on a line. */
36878 static void
36879 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36880 HOST_WIDE_INT flags)
36882 rs6000_print_options_internal (file, indent, string, flags, "-m",
36883 &rs6000_opt_masks[0],
36884 ARRAY_SIZE (rs6000_opt_masks));
36887 static void
36888 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36889 HOST_WIDE_INT flags)
36891 rs6000_print_options_internal (file, indent, string, flags, "",
36892 &rs6000_builtin_mask_names[0],
36893 ARRAY_SIZE (rs6000_builtin_mask_names));
36896 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36897 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36898 -mupper-regs-df, etc.).
36900 If the user used -mno-power8-vector, we need to turn off all of the implicit
36901 ISA 2.07 and 3.0 options that relate to the vector unit.
36903 If the user used -mno-power9-vector, we need to turn off all of the implicit
36904 ISA 3.0 options that relate to the vector unit.
36906 This function does not handle explicit options such as the user specifying
36907 -mdirect-move. These are handled in rs6000_option_override_internal, and
36908 the appropriate error is given if needed.
36910 We return a mask of all of the implicit options that should not be enabled
36911 by default. */
36913 static HOST_WIDE_INT
36914 rs6000_disable_incompatible_switches (void)
36916 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36917 size_t i, j;
36919 static const struct {
36920 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36921 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36922 const char *const name; /* name of the switch. */
36923 } flags[] = {
36924 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36925 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36926 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36929 for (i = 0; i < ARRAY_SIZE (flags); i++)
36931 HOST_WIDE_INT no_flag = flags[i].no_flag;
36933 if ((rs6000_isa_flags & no_flag) == 0
36934 && (rs6000_isa_flags_explicit & no_flag) != 0)
36936 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36937 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36938 & rs6000_isa_flags
36939 & dep_flags);
36941 if (set_flags)
36943 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36944 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36946 set_flags &= ~rs6000_opt_masks[j].mask;
36947 error ("%<-mno-%s%> turns off %<-m%s%>",
36948 flags[i].name,
36949 rs6000_opt_masks[j].name);
36952 gcc_assert (!set_flags);
36955 rs6000_isa_flags &= ~dep_flags;
36956 ignore_masks |= no_flag | dep_flags;
36960 return ignore_masks;
36964 /* Helper function for printing the function name when debugging. */
36966 static const char *
36967 get_decl_name (tree fn)
36969 tree name;
36971 if (!fn)
36972 return "<null>";
36974 name = DECL_NAME (fn);
36975 if (!name)
36976 return "<no-name>";
36978 return IDENTIFIER_POINTER (name);
36981 /* Return the clone id of the target we are compiling code for in a target
36982 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36983 the priority list for the target clones (ordered from lowest to
36984 highest). */
36986 static int
36987 rs6000_clone_priority (tree fndecl)
36989 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36990 HOST_WIDE_INT isa_masks;
36991 int ret = CLONE_DEFAULT;
36992 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36993 const char *attrs_str = NULL;
36995 attrs = TREE_VALUE (TREE_VALUE (attrs));
36996 attrs_str = TREE_STRING_POINTER (attrs);
36998 /* Return priority zero for default function. Return the ISA needed for the
36999 function if it is not the default. */
37000 if (strcmp (attrs_str, "default") != 0)
37002 if (fn_opts == NULL_TREE)
37003 fn_opts = target_option_default_node;
37005 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37006 isa_masks = rs6000_isa_flags;
37007 else
37008 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37010 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37011 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37012 break;
37015 if (TARGET_DEBUG_TARGET)
37016 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37017 get_decl_name (fndecl), ret);
37019 return ret;
37022 /* This compares the priority of target features in function DECL1 and DECL2.
37023 It returns positive value if DECL1 is higher priority, negative value if
37024 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37025 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37027 static int
37028 rs6000_compare_version_priority (tree decl1, tree decl2)
37030 int priority1 = rs6000_clone_priority (decl1);
37031 int priority2 = rs6000_clone_priority (decl2);
37032 int ret = priority1 - priority2;
37034 if (TARGET_DEBUG_TARGET)
37035 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37036 get_decl_name (decl1), get_decl_name (decl2), ret);
37038 return ret;
37041 /* Make a dispatcher declaration for the multi-versioned function DECL.
37042 Calls to DECL function will be replaced with calls to the dispatcher
37043 by the front-end. Returns the decl of the dispatcher function. */
37045 static tree
37046 rs6000_get_function_versions_dispatcher (void *decl)
37048 tree fn = (tree) decl;
37049 struct cgraph_node *node = NULL;
37050 struct cgraph_node *default_node = NULL;
37051 struct cgraph_function_version_info *node_v = NULL;
37052 struct cgraph_function_version_info *first_v = NULL;
37054 tree dispatch_decl = NULL;
37056 struct cgraph_function_version_info *default_version_info = NULL;
37057 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37059 if (TARGET_DEBUG_TARGET)
37060 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37061 get_decl_name (fn));
37063 node = cgraph_node::get (fn);
37064 gcc_assert (node != NULL);
37066 node_v = node->function_version ();
37067 gcc_assert (node_v != NULL);
37069 if (node_v->dispatcher_resolver != NULL)
37070 return node_v->dispatcher_resolver;
37072 /* Find the default version and make it the first node. */
37073 first_v = node_v;
37074 /* Go to the beginning of the chain. */
37075 while (first_v->prev != NULL)
37076 first_v = first_v->prev;
37078 default_version_info = first_v;
37079 while (default_version_info != NULL)
37081 const tree decl2 = default_version_info->this_node->decl;
37082 if (is_function_default_version (decl2))
37083 break;
37084 default_version_info = default_version_info->next;
37087 /* If there is no default node, just return NULL. */
37088 if (default_version_info == NULL)
37089 return NULL;
37091 /* Make default info the first node. */
37092 if (first_v != default_version_info)
37094 default_version_info->prev->next = default_version_info->next;
37095 if (default_version_info->next)
37096 default_version_info->next->prev = default_version_info->prev;
37097 first_v->prev = default_version_info;
37098 default_version_info->next = first_v;
37099 default_version_info->prev = NULL;
37102 default_node = default_version_info->this_node;
37104 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37105 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37106 "target_clones attribute needs GLIBC (2.23 and newer) that "
37107 "exports hardware capability bits");
37108 #else
37110 if (targetm.has_ifunc_p ())
37112 struct cgraph_function_version_info *it_v = NULL;
37113 struct cgraph_node *dispatcher_node = NULL;
37114 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37116 /* Right now, the dispatching is done via ifunc. */
37117 dispatch_decl = make_dispatcher_decl (default_node->decl);
37119 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37120 gcc_assert (dispatcher_node != NULL);
37121 dispatcher_node->dispatcher_function = 1;
37122 dispatcher_version_info
37123 = dispatcher_node->insert_new_function_version ();
37124 dispatcher_version_info->next = default_version_info;
37125 dispatcher_node->definition = 1;
37127 /* Set the dispatcher for all the versions. */
37128 it_v = default_version_info;
37129 while (it_v != NULL)
37131 it_v->dispatcher_resolver = dispatch_decl;
37132 it_v = it_v->next;
37135 else
37137 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37138 "multiversioning needs ifunc which is not supported "
37139 "on this target");
37141 #endif
37143 return dispatch_decl;
37146 /* Make the resolver function decl to dispatch the versions of a multi-
37147 versioned function, DEFAULT_DECL. Create an empty basic block in the
37148 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37149 function. */
37151 static tree
37152 make_resolver_func (const tree default_decl,
37153 const tree dispatch_decl,
37154 basic_block *empty_bb)
37156 /* Make the resolver function static. The resolver function returns
37157 void *. */
37158 tree decl_name = clone_function_name (default_decl, "resolver");
37159 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37160 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37161 tree decl = build_fn_decl (resolver_name, type);
37162 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37164 DECL_NAME (decl) = decl_name;
37165 TREE_USED (decl) = 1;
37166 DECL_ARTIFICIAL (decl) = 1;
37167 DECL_IGNORED_P (decl) = 0;
37168 TREE_PUBLIC (decl) = 0;
37169 DECL_UNINLINABLE (decl) = 1;
37171 /* Resolver is not external, body is generated. */
37172 DECL_EXTERNAL (decl) = 0;
37173 DECL_EXTERNAL (dispatch_decl) = 0;
37175 DECL_CONTEXT (decl) = NULL_TREE;
37176 DECL_INITIAL (decl) = make_node (BLOCK);
37177 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37179 /* Build result decl and add to function_decl. */
37180 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37181 DECL_ARTIFICIAL (t) = 1;
37182 DECL_IGNORED_P (t) = 1;
37183 DECL_RESULT (decl) = t;
37185 gimplify_function_tree (decl);
37186 push_cfun (DECL_STRUCT_FUNCTION (decl));
37187 *empty_bb = init_lowered_empty_function (decl, false,
37188 profile_count::uninitialized ());
37190 cgraph_node::add_new_function (decl, true);
37191 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37193 pop_cfun ();
37195 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37196 DECL_ATTRIBUTES (dispatch_decl)
37197 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37199 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37201 return decl;
37204 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37205 return a pointer to VERSION_DECL if we are running on a machine that
37206 supports the index CLONE_ISA hardware architecture bits. This function will
37207 be called during version dispatch to decide which function version to
37208 execute. It returns the basic block at the end, to which more conditions
37209 can be added. */
37211 static basic_block
37212 add_condition_to_bb (tree function_decl, tree version_decl,
37213 int clone_isa, basic_block new_bb)
37215 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37217 gcc_assert (new_bb != NULL);
37218 gimple_seq gseq = bb_seq (new_bb);
37221 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37222 build_fold_addr_expr (version_decl));
37223 tree result_var = create_tmp_var (ptr_type_node);
37224 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37225 gimple *return_stmt = gimple_build_return (result_var);
37227 if (clone_isa == CLONE_DEFAULT)
37229 gimple_seq_add_stmt (&gseq, convert_stmt);
37230 gimple_seq_add_stmt (&gseq, return_stmt);
37231 set_bb_seq (new_bb, gseq);
37232 gimple_set_bb (convert_stmt, new_bb);
37233 gimple_set_bb (return_stmt, new_bb);
37234 pop_cfun ();
37235 return new_bb;
37238 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37239 tree cond_var = create_tmp_var (bool_int_type_node);
37240 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37241 const char *arg_str = rs6000_clone_map[clone_isa].name;
37242 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37243 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37244 gimple_call_set_lhs (call_cond_stmt, cond_var);
37246 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37247 gimple_set_bb (call_cond_stmt, new_bb);
37248 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37250 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37251 NULL_TREE, NULL_TREE);
37252 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37253 gimple_set_bb (if_else_stmt, new_bb);
37254 gimple_seq_add_stmt (&gseq, if_else_stmt);
37256 gimple_seq_add_stmt (&gseq, convert_stmt);
37257 gimple_seq_add_stmt (&gseq, return_stmt);
37258 set_bb_seq (new_bb, gseq);
37260 basic_block bb1 = new_bb;
37261 edge e12 = split_block (bb1, if_else_stmt);
37262 basic_block bb2 = e12->dest;
37263 e12->flags &= ~EDGE_FALLTHRU;
37264 e12->flags |= EDGE_TRUE_VALUE;
37266 edge e23 = split_block (bb2, return_stmt);
37267 gimple_set_bb (convert_stmt, bb2);
37268 gimple_set_bb (return_stmt, bb2);
37270 basic_block bb3 = e23->dest;
37271 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37273 remove_edge (e23);
37274 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37276 pop_cfun ();
37277 return bb3;
37280 /* This function generates the dispatch function for multi-versioned functions.
37281 DISPATCH_DECL is the function which will contain the dispatch logic.
37282 FNDECLS are the function choices for dispatch, and is a tree chain.
37283 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37284 code is generated. */
37286 static int
37287 dispatch_function_versions (tree dispatch_decl,
37288 void *fndecls_p,
37289 basic_block *empty_bb)
37291 int ix;
37292 tree ele;
37293 vec<tree> *fndecls;
37294 tree clones[CLONE_MAX];
37296 if (TARGET_DEBUG_TARGET)
37297 fputs ("dispatch_function_versions, top\n", stderr);
37299 gcc_assert (dispatch_decl != NULL
37300 && fndecls_p != NULL
37301 && empty_bb != NULL);
37303 /* fndecls_p is actually a vector. */
37304 fndecls = static_cast<vec<tree> *> (fndecls_p);
37306 /* At least one more version other than the default. */
37307 gcc_assert (fndecls->length () >= 2);
37309 /* The first version in the vector is the default decl. */
37310 memset ((void *) clones, '\0', sizeof (clones));
37311 clones[CLONE_DEFAULT] = (*fndecls)[0];
37313 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37314 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37315 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37316 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37317 to insert the code here to do the call. */
37319 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37321 int priority = rs6000_clone_priority (ele);
37322 if (!clones[priority])
37323 clones[priority] = ele;
37326 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37327 if (clones[ix])
37329 if (TARGET_DEBUG_TARGET)
37330 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37331 ix, get_decl_name (clones[ix]));
37333 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37334 *empty_bb);
37337 return 0;
37340 /* Generate the dispatching code body to dispatch multi-versioned function
37341 DECL. The target hook is called to process the "target" attributes and
37342 provide the code to dispatch the right function at run-time. NODE points
37343 to the dispatcher decl whose body will be created. */
37345 static tree
37346 rs6000_generate_version_dispatcher_body (void *node_p)
37348 tree resolver;
37349 basic_block empty_bb;
37350 struct cgraph_node *node = (cgraph_node *) node_p;
37351 struct cgraph_function_version_info *ninfo = node->function_version ();
37353 if (ninfo->dispatcher_resolver)
37354 return ninfo->dispatcher_resolver;
37356 /* node is going to be an alias, so remove the finalized bit. */
37357 node->definition = false;
37359 /* The first version in the chain corresponds to the default version. */
37360 ninfo->dispatcher_resolver = resolver
37361 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37363 if (TARGET_DEBUG_TARGET)
37364 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37365 get_decl_name (resolver));
37367 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37368 auto_vec<tree, 2> fn_ver_vec;
37370 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37371 vinfo;
37372 vinfo = vinfo->next)
37374 struct cgraph_node *version = vinfo->this_node;
37375 /* Check for virtual functions here again, as by this time it should
37376 have been determined if this function needs a vtable index or
37377 not. This happens for methods in derived classes that override
37378 virtual methods in base classes but are not explicitly marked as
37379 virtual. */
37380 if (DECL_VINDEX (version->decl))
37381 sorry ("Virtual function multiversioning not supported");
37383 fn_ver_vec.safe_push (version->decl);
37386 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37387 cgraph_edge::rebuild_edges ();
37388 pop_cfun ();
37389 return resolver;
37393 /* Hook to determine if one function can safely inline another. */
37395 static bool
37396 rs6000_can_inline_p (tree caller, tree callee)
37398 bool ret = false;
37399 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37400 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37402 /* If callee has no option attributes, then it is ok to inline. */
37403 if (!callee_tree)
37404 ret = true;
37406 /* If caller has no option attributes, but callee does then it is not ok to
37407 inline. */
37408 else if (!caller_tree)
37409 ret = false;
37411 else
37413 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37414 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37416 /* Callee's options should a subset of the caller's, i.e. a vsx function
37417 can inline an altivec function but a non-vsx function can't inline a
37418 vsx function. */
37419 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37420 == callee_opts->x_rs6000_isa_flags)
37421 ret = true;
37424 if (TARGET_DEBUG_TARGET)
37425 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37426 get_decl_name (caller), get_decl_name (callee),
37427 (ret ? "can" : "cannot"));
37429 return ret;
37432 /* Allocate a stack temp and fixup the address so it meets the particular
37433 memory requirements (either offetable or REG+REG addressing). */
37436 rs6000_allocate_stack_temp (machine_mode mode,
37437 bool offsettable_p,
37438 bool reg_reg_p)
37440 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37441 rtx addr = XEXP (stack, 0);
37442 int strict_p = reload_completed;
37444 if (!legitimate_indirect_address_p (addr, strict_p))
37446 if (offsettable_p
37447 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37448 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37450 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37451 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37454 return stack;
37457 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37458 to such a form to deal with memory reference instructions like STFIWX that
37459 only take reg+reg addressing. */
37462 rs6000_address_for_fpconvert (rtx x)
37464 rtx addr;
37466 gcc_assert (MEM_P (x));
37467 addr = XEXP (x, 0);
37468 if (! legitimate_indirect_address_p (addr, reload_completed)
37469 && ! legitimate_indexed_address_p (addr, reload_completed))
37471 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37473 rtx reg = XEXP (addr, 0);
37474 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37475 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37476 gcc_assert (REG_P (reg));
37477 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37478 addr = reg;
37480 else if (GET_CODE (addr) == PRE_MODIFY)
37482 rtx reg = XEXP (addr, 0);
37483 rtx expr = XEXP (addr, 1);
37484 gcc_assert (REG_P (reg));
37485 gcc_assert (GET_CODE (expr) == PLUS);
37486 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37487 addr = reg;
37490 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37493 return x;
37496 /* Given a memory reference, if it is not in the form for altivec memory
37497 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
37498 convert to the altivec format. */
37501 rs6000_address_for_altivec (rtx x)
37503 gcc_assert (MEM_P (x));
37504 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
37506 rtx addr = XEXP (x, 0);
37508 if (!legitimate_indexed_address_p (addr, reload_completed)
37509 && !legitimate_indirect_address_p (addr, reload_completed))
37510 addr = copy_to_mode_reg (Pmode, addr);
37512 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
37513 x = change_address (x, GET_MODE (x), addr);
37516 return x;
37519 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37521 On the RS/6000, all integer constants are acceptable, most won't be valid
37522 for particular insns, though. Only easy FP constants are acceptable. */
37524 static bool
37525 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37527 if (TARGET_ELF && tls_referenced_p (x))
37528 return false;
37530 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37531 || GET_MODE (x) == VOIDmode
37532 || (TARGET_POWERPC64 && mode == DImode)
37533 || easy_fp_constant (x, mode)
37534 || easy_vector_constant (x, mode));
37538 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37540 static bool
37541 chain_already_loaded (rtx_insn *last)
37543 for (; last != NULL; last = PREV_INSN (last))
37545 if (NONJUMP_INSN_P (last))
37547 rtx patt = PATTERN (last);
37549 if (GET_CODE (patt) == SET)
37551 rtx lhs = XEXP (patt, 0);
37553 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37554 return true;
37558 return false;
37561 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37563 void
37564 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37566 const bool direct_call_p
37567 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37568 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37569 rtx toc_load = NULL_RTX;
37570 rtx toc_restore = NULL_RTX;
37571 rtx func_addr;
37572 rtx abi_reg = NULL_RTX;
37573 rtx call[4];
37574 int n_call;
37575 rtx insn;
37577 /* Handle longcall attributes. */
37578 if (INTVAL (cookie) & CALL_LONG)
37579 func_desc = rs6000_longcall_ref (func_desc);
37581 /* Handle indirect calls. */
37582 if (GET_CODE (func_desc) != SYMBOL_REF
37583 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37585 /* Save the TOC into its reserved slot before the call,
37586 and prepare to restore it after the call. */
37587 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37588 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37589 rtx stack_toc_mem = gen_frame_mem (Pmode,
37590 gen_rtx_PLUS (Pmode, stack_ptr,
37591 stack_toc_offset));
37592 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37593 gen_rtvec (1, stack_toc_offset),
37594 UNSPEC_TOCSLOT);
37595 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37597 /* Can we optimize saving the TOC in the prologue or
37598 do we need to do it at every call? */
37599 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37600 cfun->machine->save_toc_in_prologue = true;
37601 else
37603 MEM_VOLATILE_P (stack_toc_mem) = 1;
37604 emit_move_insn (stack_toc_mem, toc_reg);
37607 if (DEFAULT_ABI == ABI_ELFv2)
37609 /* A function pointer in the ELFv2 ABI is just a plain address, but
37610 the ABI requires it to be loaded into r12 before the call. */
37611 func_addr = gen_rtx_REG (Pmode, 12);
37612 emit_move_insn (func_addr, func_desc);
37613 abi_reg = func_addr;
37615 else
37617 /* A function pointer under AIX is a pointer to a data area whose
37618 first word contains the actual address of the function, whose
37619 second word contains a pointer to its TOC, and whose third word
37620 contains a value to place in the static chain register (r11).
37621 Note that if we load the static chain, our "trampoline" need
37622 not have any executable code. */
37624 /* Load up address of the actual function. */
37625 func_desc = force_reg (Pmode, func_desc);
37626 func_addr = gen_reg_rtx (Pmode);
37627 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37629 /* Prepare to load the TOC of the called function. Note that the
37630 TOC load must happen immediately before the actual call so
37631 that unwinding the TOC registers works correctly. See the
37632 comment in frob_update_context. */
37633 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37634 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37635 gen_rtx_PLUS (Pmode, func_desc,
37636 func_toc_offset));
37637 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37639 /* If we have a static chain, load it up. But, if the call was
37640 originally direct, the 3rd word has not been written since no
37641 trampoline has been built, so we ought not to load it, lest we
37642 override a static chain value. */
37643 if (!direct_call_p
37644 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37645 && !chain_already_loaded (get_current_sequence ()->next->last))
37647 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37648 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37649 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37650 gen_rtx_PLUS (Pmode, func_desc,
37651 func_sc_offset));
37652 emit_move_insn (sc_reg, func_sc_mem);
37653 abi_reg = sc_reg;
37657 else
37659 /* Direct calls use the TOC: for local calls, the callee will
37660 assume the TOC register is set; for non-local calls, the
37661 PLT stub needs the TOC register. */
37662 abi_reg = toc_reg;
37663 func_addr = func_desc;
37666 /* Create the call. */
37667 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37668 if (value != NULL_RTX)
37669 call[0] = gen_rtx_SET (value, call[0]);
37670 n_call = 1;
37672 if (toc_load)
37673 call[n_call++] = toc_load;
37674 if (toc_restore)
37675 call[n_call++] = toc_restore;
37677 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37679 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37680 insn = emit_call_insn (insn);
37682 /* Mention all registers defined by the ABI to hold information
37683 as uses in CALL_INSN_FUNCTION_USAGE. */
37684 if (abi_reg)
37685 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37688 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37690 void
37691 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37693 rtx call[2];
37694 rtx insn;
37696 gcc_assert (INTVAL (cookie) == 0);
37698 /* Create the call. */
37699 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37700 if (value != NULL_RTX)
37701 call[0] = gen_rtx_SET (value, call[0]);
37703 call[1] = simple_return_rtx;
37705 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37706 insn = emit_call_insn (insn);
37708 /* Note use of the TOC register. */
37709 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37712 /* Return whether we need to always update the saved TOC pointer when we update
37713 the stack pointer. */
37715 static bool
37716 rs6000_save_toc_in_prologue_p (void)
37718 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37721 #ifdef HAVE_GAS_HIDDEN
37722 # define USE_HIDDEN_LINKONCE 1
37723 #else
37724 # define USE_HIDDEN_LINKONCE 0
37725 #endif
37727 /* Fills in the label name that should be used for a 476 link stack thunk. */
37729 void
37730 get_ppc476_thunk_name (char name[32])
37732 gcc_assert (TARGET_LINK_STACK);
37734 if (USE_HIDDEN_LINKONCE)
37735 sprintf (name, "__ppc476.get_thunk");
37736 else
37737 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37740 /* This function emits the simple thunk routine that is used to preserve
37741 the link stack on the 476 cpu. */
37743 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37744 static void
37745 rs6000_code_end (void)
37747 char name[32];
37748 tree decl;
37750 if (!TARGET_LINK_STACK)
37751 return;
37753 get_ppc476_thunk_name (name);
37755 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37756 build_function_type_list (void_type_node, NULL_TREE));
37757 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37758 NULL_TREE, void_type_node);
37759 TREE_PUBLIC (decl) = 1;
37760 TREE_STATIC (decl) = 1;
37762 #if RS6000_WEAK
37763 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37765 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37766 targetm.asm_out.unique_section (decl, 0);
37767 switch_to_section (get_named_section (decl, NULL, 0));
37768 DECL_WEAK (decl) = 1;
37769 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37770 targetm.asm_out.globalize_label (asm_out_file, name);
37771 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37772 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37774 else
37775 #endif
37777 switch_to_section (text_section);
37778 ASM_OUTPUT_LABEL (asm_out_file, name);
37781 DECL_INITIAL (decl) = make_node (BLOCK);
37782 current_function_decl = decl;
37783 allocate_struct_function (decl, false);
37784 init_function_start (decl);
37785 first_function_block_is_cold = false;
37786 /* Make sure unwind info is emitted for the thunk if needed. */
37787 final_start_function (emit_barrier (), asm_out_file, 1);
37789 fputs ("\tblr\n", asm_out_file);
37791 final_end_function ();
37792 init_insn_lengths ();
37793 free_after_compilation (cfun);
37794 set_cfun (NULL);
37795 current_function_decl = NULL;
37798 /* Add r30 to hard reg set if the prologue sets it up and it is not
37799 pic_offset_table_rtx. */
37801 static void
37802 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37804 if (!TARGET_SINGLE_PIC_BASE
37805 && TARGET_TOC
37806 && TARGET_MINIMAL_TOC
37807 && !constant_pool_empty_p ())
37808 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37809 if (cfun->machine->split_stack_argp_used)
37810 add_to_hard_reg_set (&set->set, Pmode, 12);
37814 /* Helper function for rs6000_split_logical to emit a logical instruction after
37815 spliting the operation to single GPR registers.
37817 DEST is the destination register.
37818 OP1 and OP2 are the input source registers.
37819 CODE is the base operation (AND, IOR, XOR, NOT).
37820 MODE is the machine mode.
37821 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37822 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37823 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37825 static void
37826 rs6000_split_logical_inner (rtx dest,
37827 rtx op1,
37828 rtx op2,
37829 enum rtx_code code,
37830 machine_mode mode,
37831 bool complement_final_p,
37832 bool complement_op1_p,
37833 bool complement_op2_p)
37835 rtx bool_rtx;
37837 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37838 if (op2 && GET_CODE (op2) == CONST_INT
37839 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37840 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37842 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37843 HOST_WIDE_INT value = INTVAL (op2) & mask;
37845 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37846 if (code == AND)
37848 if (value == 0)
37850 emit_insn (gen_rtx_SET (dest, const0_rtx));
37851 return;
37854 else if (value == mask)
37856 if (!rtx_equal_p (dest, op1))
37857 emit_insn (gen_rtx_SET (dest, op1));
37858 return;
37862 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37863 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37864 else if (code == IOR || code == XOR)
37866 if (value == 0)
37868 if (!rtx_equal_p (dest, op1))
37869 emit_insn (gen_rtx_SET (dest, op1));
37870 return;
37875 if (code == AND && mode == SImode
37876 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37878 emit_insn (gen_andsi3 (dest, op1, op2));
37879 return;
37882 if (complement_op1_p)
37883 op1 = gen_rtx_NOT (mode, op1);
37885 if (complement_op2_p)
37886 op2 = gen_rtx_NOT (mode, op2);
37888 /* For canonical RTL, if only one arm is inverted it is the first. */
37889 if (!complement_op1_p && complement_op2_p)
37890 std::swap (op1, op2);
37892 bool_rtx = ((code == NOT)
37893 ? gen_rtx_NOT (mode, op1)
37894 : gen_rtx_fmt_ee (code, mode, op1, op2));
37896 if (complement_final_p)
37897 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37899 emit_insn (gen_rtx_SET (dest, bool_rtx));
37902 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37903 operations are split immediately during RTL generation to allow for more
37904 optimizations of the AND/IOR/XOR.
37906 OPERANDS is an array containing the destination and two input operands.
37907 CODE is the base operation (AND, IOR, XOR, NOT).
37908 MODE is the machine mode.
37909 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37910 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37911 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37912 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37913 formation of the AND instructions. */
37915 static void
37916 rs6000_split_logical_di (rtx operands[3],
37917 enum rtx_code code,
37918 bool complement_final_p,
37919 bool complement_op1_p,
37920 bool complement_op2_p)
37922 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37923 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37924 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37925 enum hi_lo { hi = 0, lo = 1 };
37926 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37927 size_t i;
37929 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37930 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37931 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37932 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37934 if (code == NOT)
37935 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37936 else
37938 if (GET_CODE (operands[2]) != CONST_INT)
37940 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37941 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37943 else
37945 HOST_WIDE_INT value = INTVAL (operands[2]);
37946 HOST_WIDE_INT value_hi_lo[2];
37948 gcc_assert (!complement_final_p);
37949 gcc_assert (!complement_op1_p);
37950 gcc_assert (!complement_op2_p);
37952 value_hi_lo[hi] = value >> 32;
37953 value_hi_lo[lo] = value & lower_32bits;
37955 for (i = 0; i < 2; i++)
37957 HOST_WIDE_INT sub_value = value_hi_lo[i];
37959 if (sub_value & sign_bit)
37960 sub_value |= upper_32bits;
37962 op2_hi_lo[i] = GEN_INT (sub_value);
37964 /* If this is an AND instruction, check to see if we need to load
37965 the value in a register. */
37966 if (code == AND && sub_value != -1 && sub_value != 0
37967 && !and_operand (op2_hi_lo[i], SImode))
37968 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37973 for (i = 0; i < 2; i++)
37975 /* Split large IOR/XOR operations. */
37976 if ((code == IOR || code == XOR)
37977 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37978 && !complement_final_p
37979 && !complement_op1_p
37980 && !complement_op2_p
37981 && !logical_const_operand (op2_hi_lo[i], SImode))
37983 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37984 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37985 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37986 rtx tmp = gen_reg_rtx (SImode);
37988 /* Make sure the constant is sign extended. */
37989 if ((hi_16bits & sign_bit) != 0)
37990 hi_16bits |= upper_32bits;
37992 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37993 code, SImode, false, false, false);
37995 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37996 code, SImode, false, false, false);
37998 else
37999 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38000 code, SImode, complement_final_p,
38001 complement_op1_p, complement_op2_p);
38004 return;
38007 /* Split the insns that make up boolean operations operating on multiple GPR
38008 registers. The boolean MD patterns ensure that the inputs either are
38009 exactly the same as the output registers, or there is no overlap.
38011 OPERANDS is an array containing the destination and two input operands.
38012 CODE is the base operation (AND, IOR, XOR, NOT).
38013 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38014 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38015 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38017 void
38018 rs6000_split_logical (rtx operands[3],
38019 enum rtx_code code,
38020 bool complement_final_p,
38021 bool complement_op1_p,
38022 bool complement_op2_p)
38024 machine_mode mode = GET_MODE (operands[0]);
38025 machine_mode sub_mode;
38026 rtx op0, op1, op2;
38027 int sub_size, regno0, regno1, nregs, i;
38029 /* If this is DImode, use the specialized version that can run before
38030 register allocation. */
38031 if (mode == DImode && !TARGET_POWERPC64)
38033 rs6000_split_logical_di (operands, code, complement_final_p,
38034 complement_op1_p, complement_op2_p);
38035 return;
38038 op0 = operands[0];
38039 op1 = operands[1];
38040 op2 = (code == NOT) ? NULL_RTX : operands[2];
38041 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38042 sub_size = GET_MODE_SIZE (sub_mode);
38043 regno0 = REGNO (op0);
38044 regno1 = REGNO (op1);
38046 gcc_assert (reload_completed);
38047 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38048 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38050 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38051 gcc_assert (nregs > 1);
38053 if (op2 && REG_P (op2))
38054 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38056 for (i = 0; i < nregs; i++)
38058 int offset = i * sub_size;
38059 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38060 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38061 rtx sub_op2 = ((code == NOT)
38062 ? NULL_RTX
38063 : simplify_subreg (sub_mode, op2, mode, offset));
38065 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38066 complement_final_p, complement_op1_p,
38067 complement_op2_p);
38070 return;
38074 /* Return true if the peephole2 can combine a load involving a combination of
38075 an addis instruction and a load with an offset that can be fused together on
38076 a power8. */
38078 bool
38079 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38080 rtx addis_value, /* addis value. */
38081 rtx target, /* target register that is loaded. */
38082 rtx mem) /* bottom part of the memory addr. */
38084 rtx addr;
38085 rtx base_reg;
38087 /* Validate arguments. */
38088 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38089 return false;
38091 if (!base_reg_operand (target, GET_MODE (target)))
38092 return false;
38094 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38095 return false;
38097 /* Allow sign/zero extension. */
38098 if (GET_CODE (mem) == ZERO_EXTEND
38099 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38100 mem = XEXP (mem, 0);
38102 if (!MEM_P (mem))
38103 return false;
38105 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38106 return false;
38108 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38109 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38110 return false;
38112 /* Validate that the register used to load the high value is either the
38113 register being loaded, or we can safely replace its use.
38115 This function is only called from the peephole2 pass and we assume that
38116 there are 2 instructions in the peephole (addis and load), so we want to
38117 check if the target register was not used in the memory address and the
38118 register to hold the addis result is dead after the peephole. */
38119 if (REGNO (addis_reg) != REGNO (target))
38121 if (reg_mentioned_p (target, mem))
38122 return false;
38124 if (!peep2_reg_dead_p (2, addis_reg))
38125 return false;
38127 /* If the target register being loaded is the stack pointer, we must
38128 avoid loading any other value into it, even temporarily. */
38129 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38130 return false;
38133 base_reg = XEXP (addr, 0);
38134 return REGNO (addis_reg) == REGNO (base_reg);
38137 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38138 sequence. We adjust the addis register to use the target register. If the
38139 load sign extends, we adjust the code to do the zero extending load, and an
38140 explicit sign extension later since the fusion only covers zero extending
38141 loads.
38143 The operands are:
38144 operands[0] register set with addis (to be replaced with target)
38145 operands[1] value set via addis
38146 operands[2] target register being loaded
38147 operands[3] D-form memory reference using operands[0]. */
38149 void
38150 expand_fusion_gpr_load (rtx *operands)
38152 rtx addis_value = operands[1];
38153 rtx target = operands[2];
38154 rtx orig_mem = operands[3];
38155 rtx new_addr, new_mem, orig_addr, offset;
38156 enum rtx_code plus_or_lo_sum;
38157 machine_mode target_mode = GET_MODE (target);
38158 machine_mode extend_mode = target_mode;
38159 machine_mode ptr_mode = Pmode;
38160 enum rtx_code extend = UNKNOWN;
38162 if (GET_CODE (orig_mem) == ZERO_EXTEND
38163 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38165 extend = GET_CODE (orig_mem);
38166 orig_mem = XEXP (orig_mem, 0);
38167 target_mode = GET_MODE (orig_mem);
38170 gcc_assert (MEM_P (orig_mem));
38172 orig_addr = XEXP (orig_mem, 0);
38173 plus_or_lo_sum = GET_CODE (orig_addr);
38174 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38176 offset = XEXP (orig_addr, 1);
38177 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38178 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38180 if (extend != UNKNOWN)
38181 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38183 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38184 UNSPEC_FUSION_GPR);
38185 emit_insn (gen_rtx_SET (target, new_mem));
38187 if (extend == SIGN_EXTEND)
38189 int sub_off = ((BYTES_BIG_ENDIAN)
38190 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38191 : 0);
38192 rtx sign_reg
38193 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38195 emit_insn (gen_rtx_SET (target,
38196 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38199 return;
38202 /* Emit the addis instruction that will be part of a fused instruction
38203 sequence. */
38205 void
38206 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
38207 const char *mode_name)
38209 rtx fuse_ops[10];
38210 char insn_template[80];
38211 const char *addis_str = NULL;
38212 const char *comment_str = ASM_COMMENT_START;
38214 if (*comment_str == ' ')
38215 comment_str++;
38217 /* Emit the addis instruction. */
38218 fuse_ops[0] = target;
38219 if (satisfies_constraint_L (addis_value))
38221 fuse_ops[1] = addis_value;
38222 addis_str = "lis %0,%v1";
38225 else if (GET_CODE (addis_value) == PLUS)
38227 rtx op0 = XEXP (addis_value, 0);
38228 rtx op1 = XEXP (addis_value, 1);
38230 if (REG_P (op0) && CONST_INT_P (op1)
38231 && satisfies_constraint_L (op1))
38233 fuse_ops[1] = op0;
38234 fuse_ops[2] = op1;
38235 addis_str = "addis %0,%1,%v2";
38239 else if (GET_CODE (addis_value) == HIGH)
38241 rtx value = XEXP (addis_value, 0);
38242 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38244 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38245 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38246 if (TARGET_ELF)
38247 addis_str = "addis %0,%2,%1@toc@ha";
38249 else if (TARGET_XCOFF)
38250 addis_str = "addis %0,%1@u(%2)";
38252 else
38253 gcc_unreachable ();
38256 else if (GET_CODE (value) == PLUS)
38258 rtx op0 = XEXP (value, 0);
38259 rtx op1 = XEXP (value, 1);
38261 if (GET_CODE (op0) == UNSPEC
38262 && XINT (op0, 1) == UNSPEC_TOCREL
38263 && CONST_INT_P (op1))
38265 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38266 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38267 fuse_ops[3] = op1;
38268 if (TARGET_ELF)
38269 addis_str = "addis %0,%2,%1+%3@toc@ha";
38271 else if (TARGET_XCOFF)
38272 addis_str = "addis %0,%1+%3@u(%2)";
38274 else
38275 gcc_unreachable ();
38279 else if (satisfies_constraint_L (value))
38281 fuse_ops[1] = value;
38282 addis_str = "lis %0,%v1";
38285 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38287 fuse_ops[1] = value;
38288 addis_str = "lis %0,%1@ha";
38292 if (!addis_str)
38293 fatal_insn ("Could not generate addis value for fusion", addis_value);
38295 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
38296 comment, mode_name);
38297 output_asm_insn (insn_template, fuse_ops);
38300 /* Emit a D-form load or store instruction that is the second instruction
38301 of a fusion sequence. */
38303 void
38304 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38305 const char *insn_str)
38307 rtx fuse_ops[10];
38308 char insn_template[80];
38310 fuse_ops[0] = load_store_reg;
38311 fuse_ops[1] = addis_reg;
38313 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38315 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38316 fuse_ops[2] = offset;
38317 output_asm_insn (insn_template, fuse_ops);
38320 else if (GET_CODE (offset) == UNSPEC
38321 && XINT (offset, 1) == UNSPEC_TOCREL)
38323 if (TARGET_ELF)
38324 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38326 else if (TARGET_XCOFF)
38327 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38329 else
38330 gcc_unreachable ();
38332 fuse_ops[2] = XVECEXP (offset, 0, 0);
38333 output_asm_insn (insn_template, fuse_ops);
38336 else if (GET_CODE (offset) == PLUS
38337 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38338 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38339 && CONST_INT_P (XEXP (offset, 1)))
38341 rtx tocrel_unspec = XEXP (offset, 0);
38342 if (TARGET_ELF)
38343 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38345 else if (TARGET_XCOFF)
38346 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38348 else
38349 gcc_unreachable ();
38351 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38352 fuse_ops[3] = XEXP (offset, 1);
38353 output_asm_insn (insn_template, fuse_ops);
38356 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38358 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38360 fuse_ops[2] = offset;
38361 output_asm_insn (insn_template, fuse_ops);
38364 else
38365 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38367 return;
38370 /* Wrap a TOC address that can be fused to indicate that special fusion
38371 processing is needed. */
38374 fusion_wrap_memory_address (rtx old_mem)
38376 rtx old_addr = XEXP (old_mem, 0);
38377 rtvec v = gen_rtvec (1, old_addr);
38378 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38379 return replace_equiv_address_nv (old_mem, new_addr, false);
38382 /* Given an address, convert it into the addis and load offset parts. Addresses
38383 created during the peephole2 process look like:
38384 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38385 (unspec [(...)] UNSPEC_TOCREL))
38387 Addresses created via toc fusion look like:
38388 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38390 static void
38391 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38393 rtx hi, lo;
38395 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38397 lo = XVECEXP (addr, 0, 0);
38398 hi = gen_rtx_HIGH (Pmode, lo);
38400 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38402 hi = XEXP (addr, 0);
38403 lo = XEXP (addr, 1);
38405 else
38406 gcc_unreachable ();
38408 *p_hi = hi;
38409 *p_lo = lo;
38412 /* Return a string to fuse an addis instruction with a gpr load to the same
38413 register that we loaded up the addis instruction. The address that is used
38414 is the logical address that was formed during peephole2:
38415 (lo_sum (high) (low-part))
38417 Or the address is the TOC address that is wrapped before register allocation:
38418 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38420 The code is complicated, so we call output_asm_insn directly, and just
38421 return "". */
38423 const char *
38424 emit_fusion_gpr_load (rtx target, rtx mem)
38426 rtx addis_value;
38427 rtx addr;
38428 rtx load_offset;
38429 const char *load_str = NULL;
38430 const char *mode_name = NULL;
38431 machine_mode mode;
38433 if (GET_CODE (mem) == ZERO_EXTEND)
38434 mem = XEXP (mem, 0);
38436 gcc_assert (REG_P (target) && MEM_P (mem));
38438 addr = XEXP (mem, 0);
38439 fusion_split_address (addr, &addis_value, &load_offset);
38441 /* Now emit the load instruction to the same register. */
38442 mode = GET_MODE (mem);
38443 switch (mode)
38445 case E_QImode:
38446 mode_name = "char";
38447 load_str = "lbz";
38448 break;
38450 case E_HImode:
38451 mode_name = "short";
38452 load_str = "lhz";
38453 break;
38455 case E_SImode:
38456 case E_SFmode:
38457 mode_name = (mode == SFmode) ? "float" : "int";
38458 load_str = "lwz";
38459 break;
38461 case E_DImode:
38462 case E_DFmode:
38463 gcc_assert (TARGET_POWERPC64);
38464 mode_name = (mode == DFmode) ? "double" : "long";
38465 load_str = "ld";
38466 break;
38468 default:
38469 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38472 /* Emit the addis instruction. */
38473 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
38475 /* Emit the D-form load instruction. */
38476 emit_fusion_load_store (target, target, load_offset, load_str);
38478 return "";
38482 /* Return true if the peephole2 can combine a load/store involving a
38483 combination of an addis instruction and the memory operation. This was
38484 added to the ISA 3.0 (power9) hardware. */
38486 bool
38487 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38488 rtx addis_value, /* addis value. */
38489 rtx dest, /* destination (memory or register). */
38490 rtx src) /* source (register or memory). */
38492 rtx addr, mem, offset;
38493 machine_mode mode = GET_MODE (src);
38495 /* Validate arguments. */
38496 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38497 return false;
38499 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38500 return false;
38502 /* Ignore extend operations that are part of the load. */
38503 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38504 src = XEXP (src, 0);
38506 /* Test for memory<-register or register<-memory. */
38507 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38509 if (!MEM_P (dest))
38510 return false;
38512 mem = dest;
38515 else if (MEM_P (src))
38517 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38518 return false;
38520 mem = src;
38523 else
38524 return false;
38526 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38527 if (GET_CODE (addr) == PLUS)
38529 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38530 return false;
38532 return satisfies_constraint_I (XEXP (addr, 1));
38535 else if (GET_CODE (addr) == LO_SUM)
38537 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38538 return false;
38540 offset = XEXP (addr, 1);
38541 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38542 return small_toc_ref (offset, GET_MODE (offset));
38544 else if (TARGET_ELF && !TARGET_POWERPC64)
38545 return CONSTANT_P (offset);
38548 return false;
38551 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38552 load sequence.
38554 The operands are:
38555 operands[0] register set with addis
38556 operands[1] value set via addis
38557 operands[2] target register being loaded
38558 operands[3] D-form memory reference using operands[0].
38560 This is similar to the fusion introduced with power8, except it scales to
38561 both loads/stores and does not require the result register to be the same as
38562 the base register. At the moment, we only do this if register set with addis
38563 is dead. */
38565 void
38566 expand_fusion_p9_load (rtx *operands)
38568 rtx tmp_reg = operands[0];
38569 rtx addis_value = operands[1];
38570 rtx target = operands[2];
38571 rtx orig_mem = operands[3];
38572 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38573 enum rtx_code plus_or_lo_sum;
38574 machine_mode target_mode = GET_MODE (target);
38575 machine_mode extend_mode = target_mode;
38576 machine_mode ptr_mode = Pmode;
38577 enum rtx_code extend = UNKNOWN;
38579 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38581 extend = GET_CODE (orig_mem);
38582 orig_mem = XEXP (orig_mem, 0);
38583 target_mode = GET_MODE (orig_mem);
38586 gcc_assert (MEM_P (orig_mem));
38588 orig_addr = XEXP (orig_mem, 0);
38589 plus_or_lo_sum = GET_CODE (orig_addr);
38590 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38592 offset = XEXP (orig_addr, 1);
38593 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38594 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38596 if (extend != UNKNOWN)
38597 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38599 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38600 UNSPEC_FUSION_P9);
38602 set = gen_rtx_SET (target, new_mem);
38603 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38604 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38605 emit_insn (insn);
38607 return;
38610 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38611 store sequence.
38613 The operands are:
38614 operands[0] register set with addis
38615 operands[1] value set via addis
38616 operands[2] target D-form memory being stored to
38617 operands[3] register being stored
38619 This is similar to the fusion introduced with power8, except it scales to
38620 both loads/stores and does not require the result register to be the same as
38621 the base register. At the moment, we only do this if register set with addis
38622 is dead. */
38624 void
38625 expand_fusion_p9_store (rtx *operands)
38627 rtx tmp_reg = operands[0];
38628 rtx addis_value = operands[1];
38629 rtx orig_mem = operands[2];
38630 rtx src = operands[3];
38631 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38632 enum rtx_code plus_or_lo_sum;
38633 machine_mode target_mode = GET_MODE (orig_mem);
38634 machine_mode ptr_mode = Pmode;
38636 gcc_assert (MEM_P (orig_mem));
38638 orig_addr = XEXP (orig_mem, 0);
38639 plus_or_lo_sum = GET_CODE (orig_addr);
38640 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38642 offset = XEXP (orig_addr, 1);
38643 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38644 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38646 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38647 UNSPEC_FUSION_P9);
38649 set = gen_rtx_SET (new_mem, new_src);
38650 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38651 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38652 emit_insn (insn);
38654 return;
38657 /* Return a string to fuse an addis instruction with a load using extended
38658 fusion. The address that is used is the logical address that was formed
38659 during peephole2: (lo_sum (high) (low-part))
38661 The code is complicated, so we call output_asm_insn directly, and just
38662 return "". */
38664 const char *
38665 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38667 machine_mode mode = GET_MODE (reg);
38668 rtx hi;
38669 rtx lo;
38670 rtx addr;
38671 const char *load_string;
38672 int r;
38674 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38676 mem = XEXP (mem, 0);
38677 mode = GET_MODE (mem);
38680 if (GET_CODE (reg) == SUBREG)
38682 gcc_assert (SUBREG_BYTE (reg) == 0);
38683 reg = SUBREG_REG (reg);
38686 if (!REG_P (reg))
38687 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38689 r = REGNO (reg);
38690 if (FP_REGNO_P (r))
38692 if (mode == SFmode)
38693 load_string = "lfs";
38694 else if (mode == DFmode || mode == DImode)
38695 load_string = "lfd";
38696 else
38697 gcc_unreachable ();
38699 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38701 if (mode == SFmode)
38702 load_string = "lxssp";
38703 else if (mode == DFmode || mode == DImode)
38704 load_string = "lxsd";
38705 else
38706 gcc_unreachable ();
38708 else if (INT_REGNO_P (r))
38710 switch (mode)
38712 case E_QImode:
38713 load_string = "lbz";
38714 break;
38715 case E_HImode:
38716 load_string = "lhz";
38717 break;
38718 case E_SImode:
38719 case E_SFmode:
38720 load_string = "lwz";
38721 break;
38722 case E_DImode:
38723 case E_DFmode:
38724 if (!TARGET_POWERPC64)
38725 gcc_unreachable ();
38726 load_string = "ld";
38727 break;
38728 default:
38729 gcc_unreachable ();
38732 else
38733 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38735 if (!MEM_P (mem))
38736 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38738 addr = XEXP (mem, 0);
38739 fusion_split_address (addr, &hi, &lo);
38741 /* Emit the addis instruction. */
38742 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
38744 /* Emit the D-form load instruction. */
38745 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38747 return "";
38750 /* Return a string to fuse an addis instruction with a store using extended
38751 fusion. The address that is used is the logical address that was formed
38752 during peephole2: (lo_sum (high) (low-part))
38754 The code is complicated, so we call output_asm_insn directly, and just
38755 return "". */
38757 const char *
38758 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38760 machine_mode mode = GET_MODE (reg);
38761 rtx hi;
38762 rtx lo;
38763 rtx addr;
38764 const char *store_string;
38765 int r;
38767 if (GET_CODE (reg) == SUBREG)
38769 gcc_assert (SUBREG_BYTE (reg) == 0);
38770 reg = SUBREG_REG (reg);
38773 if (!REG_P (reg))
38774 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38776 r = REGNO (reg);
38777 if (FP_REGNO_P (r))
38779 if (mode == SFmode)
38780 store_string = "stfs";
38781 else if (mode == DFmode)
38782 store_string = "stfd";
38783 else
38784 gcc_unreachable ();
38786 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38788 if (mode == SFmode)
38789 store_string = "stxssp";
38790 else if (mode == DFmode || mode == DImode)
38791 store_string = "stxsd";
38792 else
38793 gcc_unreachable ();
38795 else if (INT_REGNO_P (r))
38797 switch (mode)
38799 case E_QImode:
38800 store_string = "stb";
38801 break;
38802 case E_HImode:
38803 store_string = "sth";
38804 break;
38805 case E_SImode:
38806 case E_SFmode:
38807 store_string = "stw";
38808 break;
38809 case E_DImode:
38810 case E_DFmode:
38811 if (!TARGET_POWERPC64)
38812 gcc_unreachable ();
38813 store_string = "std";
38814 break;
38815 default:
38816 gcc_unreachable ();
38819 else
38820 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38822 if (!MEM_P (mem))
38823 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38825 addr = XEXP (mem, 0);
38826 fusion_split_address (addr, &hi, &lo);
38828 /* Emit the addis instruction. */
38829 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
38831 /* Emit the D-form load instruction. */
38832 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38834 return "";
38837 #ifdef RS6000_GLIBC_ATOMIC_FENV
38838 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38839 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38840 #endif
38842 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38844 static void
38845 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38847 if (!TARGET_HARD_FLOAT)
38849 #ifdef RS6000_GLIBC_ATOMIC_FENV
38850 if (atomic_hold_decl == NULL_TREE)
38852 atomic_hold_decl
38853 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38854 get_identifier ("__atomic_feholdexcept"),
38855 build_function_type_list (void_type_node,
38856 double_ptr_type_node,
38857 NULL_TREE));
38858 TREE_PUBLIC (atomic_hold_decl) = 1;
38859 DECL_EXTERNAL (atomic_hold_decl) = 1;
38862 if (atomic_clear_decl == NULL_TREE)
38864 atomic_clear_decl
38865 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38866 get_identifier ("__atomic_feclearexcept"),
38867 build_function_type_list (void_type_node,
38868 NULL_TREE));
38869 TREE_PUBLIC (atomic_clear_decl) = 1;
38870 DECL_EXTERNAL (atomic_clear_decl) = 1;
38873 tree const_double = build_qualified_type (double_type_node,
38874 TYPE_QUAL_CONST);
38875 tree const_double_ptr = build_pointer_type (const_double);
38876 if (atomic_update_decl == NULL_TREE)
38878 atomic_update_decl
38879 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38880 get_identifier ("__atomic_feupdateenv"),
38881 build_function_type_list (void_type_node,
38882 const_double_ptr,
38883 NULL_TREE));
38884 TREE_PUBLIC (atomic_update_decl) = 1;
38885 DECL_EXTERNAL (atomic_update_decl) = 1;
38888 tree fenv_var = create_tmp_var_raw (double_type_node);
38889 TREE_ADDRESSABLE (fenv_var) = 1;
38890 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38892 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38893 *clear = build_call_expr (atomic_clear_decl, 0);
38894 *update = build_call_expr (atomic_update_decl, 1,
38895 fold_convert (const_double_ptr, fenv_addr));
38896 #endif
38897 return;
38900 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38901 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38902 tree call_mffs = build_call_expr (mffs, 0);
38904 /* Generates the equivalent of feholdexcept (&fenv_var)
38906 *fenv_var = __builtin_mffs ();
38907 double fenv_hold;
38908 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38909 __builtin_mtfsf (0xff, fenv_hold); */
38911 /* Mask to clear everything except for the rounding modes and non-IEEE
38912 arithmetic flag. */
38913 const unsigned HOST_WIDE_INT hold_exception_mask =
38914 HOST_WIDE_INT_C (0xffffffff00000007);
38916 tree fenv_var = create_tmp_var_raw (double_type_node);
38918 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38920 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38921 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38922 build_int_cst (uint64_type_node,
38923 hold_exception_mask));
38925 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38926 fenv_llu_and);
38928 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38929 build_int_cst (unsigned_type_node, 0xff),
38930 fenv_hold_mtfsf);
38932 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38934 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38936 double fenv_clear = __builtin_mffs ();
38937 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38938 __builtin_mtfsf (0xff, fenv_clear); */
38940 /* Mask to clear everything except for the rounding modes and non-IEEE
38941 arithmetic flag. */
38942 const unsigned HOST_WIDE_INT clear_exception_mask =
38943 HOST_WIDE_INT_C (0xffffffff00000000);
38945 tree fenv_clear = create_tmp_var_raw (double_type_node);
38947 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38949 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38950 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38951 fenv_clean_llu,
38952 build_int_cst (uint64_type_node,
38953 clear_exception_mask));
38955 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38956 fenv_clear_llu_and);
38958 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38959 build_int_cst (unsigned_type_node, 0xff),
38960 fenv_clear_mtfsf);
38962 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38964 /* Generates the equivalent of feupdateenv (&fenv_var)
38966 double old_fenv = __builtin_mffs ();
38967 double fenv_update;
38968 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38969 (*(uint64_t*)fenv_var 0x1ff80fff);
38970 __builtin_mtfsf (0xff, fenv_update); */
38972 const unsigned HOST_WIDE_INT update_exception_mask =
38973 HOST_WIDE_INT_C (0xffffffff1fffff00);
38974 const unsigned HOST_WIDE_INT new_exception_mask =
38975 HOST_WIDE_INT_C (0x1ff80fff);
38977 tree old_fenv = create_tmp_var_raw (double_type_node);
38978 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38980 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38981 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38982 build_int_cst (uint64_type_node,
38983 update_exception_mask));
38985 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38986 build_int_cst (uint64_type_node,
38987 new_exception_mask));
38989 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38990 old_llu_and, new_llu_and);
38992 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38993 new_llu_mask);
38995 tree update_mtfsf = build_call_expr (mtfsf, 2,
38996 build_int_cst (unsigned_type_node, 0xff),
38997 fenv_update_mtfsf);
38999 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39002 void
39003 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39005 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39007 rtx_tmp0 = gen_reg_rtx (V2DImode);
39008 rtx_tmp1 = gen_reg_rtx (V2DImode);
39010 /* The destination of the vmrgew instruction layout is:
39011 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39012 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39013 vmrgew instruction will be correct. */
39014 if (VECTOR_ELT_ORDER_BIG)
39016 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39017 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39019 else
39021 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39022 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39025 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39026 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39028 if (signed_convert)
39030 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39031 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39033 else
39035 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39036 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39039 if (VECTOR_ELT_ORDER_BIG)
39040 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39041 else
39042 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39045 void
39046 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39047 rtx src2)
39049 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39051 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39052 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39054 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39055 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39057 rtx_tmp2 = gen_reg_rtx (V4SImode);
39058 rtx_tmp3 = gen_reg_rtx (V4SImode);
39060 if (signed_convert)
39062 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39063 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39065 else
39067 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39068 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39071 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39074 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39076 static bool
39077 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39078 optimization_type opt_type)
39080 switch (op)
39082 case rsqrt_optab:
39083 return (opt_type == OPTIMIZE_FOR_SPEED
39084 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39086 default:
39087 return true;
39091 struct gcc_target targetm = TARGET_INITIALIZER;
39093 #include "gt-rs6000.h"